code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__lowerCamelCase : Optional[int] = '''src/transformers'''
__lowerCamelCase : List[str] = '''docs/source/en/tasks'''
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with open(lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Any = start_index
while not lines[end_index].startswith(lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Dict = direct_transformers_import(TRANSFORMERS_PATH)
__lowerCamelCase : Union[str, Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__lowerCamelCase : List[Any] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase , set() )
SCREAMING_SNAKE_CASE_ : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase , lowerCAmelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
SCREAMING_SNAKE_CASE_ : List[Any] = get_model_list_for_task(lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
" to fix this." )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowerCamelCase : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0_2_4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4_0_9_6
SCREAMING_SNAKE_CASE_ : Optional[int] = 2_4
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_6
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
SCREAMING_SNAKE_CASE_ : str = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Dict = 7_6_8
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : Any = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
SCREAMING_SNAKE_CASE_ : Tuple = 1_5_0
SCREAMING_SNAKE_CASE_ : Tuple = 1_6
SCREAMING_SNAKE_CASE_ : Any = (1, 3_8_4, 3_8_4)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[int] = "project"
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 7_6_8
SCREAMING_SNAKE_CASE_ : Tuple = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : int = 1_5_0
SCREAMING_SNAKE_CASE_ : Any = 1_6
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ : Optional[int] = "ade20k-id2label.json"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE_ : List[str] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE_ : Dict = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("patch_embed" , "" )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("proj" , "projection" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
SCREAMING_SNAKE_CASE_ : Any = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE_ : Dict = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("pretrained" , "dpt" )
if "bn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("bn" , "batch_norm" )
if "head" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace("head" , "head.head" )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace(".." , "." )
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : str = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = get_dpt_config(lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
SCREAMING_SNAKE_CASE_ : Any = torch.load(lowerCAmelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Tuple = state_dict.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ : List[Any] = DPTForSemanticSegmentation(lowerCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE_ : int = 4_8_0 if "ade" in checkpoint_url else 3_8_4
SCREAMING_SNAKE_CASE_ : str = DPTImageProcessor(size=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(lowerCAmelCase , return_tensors="pt" )
# forward pass
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowerCAmelCase ).logits if "ade" in checkpoint_url else model(**lowerCAmelCase ).predicted_depth
if show_prediction:
SCREAMING_SNAKE_CASE_ : List[str] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
__lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE_ : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE_ : Tuple = _calculate(days - 1 , lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE_ : str = _calculate(days - 1 , lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE_ : Dict = prizestrings
return prizestrings
def _snake_case ( lowerCAmelCase : int = 3_0 ):
"""simple docstring"""
return _calculate(lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCamelCase : Dict = logging.get_logger(__name__)
# General docstring
__lowerCamelCase : str = '''RegNetConfig'''
# Base docstring
__lowerCamelCase : List[str] = '''facebook/regnet-y-040'''
__lowerCamelCase : str = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCamelCase : List[str] = '''facebook/regnet-y-040'''
__lowerCamelCase : str = '''tabby, tabby cat'''
__lowerCamelCase : Union[str, Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a__ ( nn.Module ):
def __init__( self : List[str],_A : int,_A : int,_A : int = 3,_A : int = 1,_A : int = 1,_A : Optional[str] = "relu",):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.Convad(
_A,_A,kernel_size=_A,stride=_A,padding=kernel_size // 2,groups=_A,bias=_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = nn.BatchNormad(_A )
SCREAMING_SNAKE_CASE_ : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCamelCase ( self : Tuple,_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.convolution(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.normalization(_A )
SCREAMING_SNAKE_CASE_ : int = self.activation(_A )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : str,_A : RegNetConfig ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = RegNetConvLayer(
config.num_channels,config.embedding_size,kernel_size=3,stride=2,activation=config.hidden_act )
SCREAMING_SNAKE_CASE_ : Dict = config.num_channels
def __UpperCamelCase ( self : Dict,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
SCREAMING_SNAKE_CASE_ : Tuple = self.embedder(_A )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : int,_A : int,_A : int,_A : int = 2 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Convad(_A,_A,kernel_size=1,stride=_A,bias=_A )
SCREAMING_SNAKE_CASE_ : str = nn.BatchNormad(_A )
def __UpperCamelCase ( self : List[str],_A : Tensor ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.convolution(_A )
SCREAMING_SNAKE_CASE_ : Any = self.normalization(_A )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : Dict,_A : int,_A : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Sequential(
nn.Convad(_A,_A,kernel_size=1 ),nn.ReLU(),nn.Convad(_A,_A,kernel_size=1 ),nn.Sigmoid(),)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.pooler(_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.attention(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_state * attention
return hidden_state
class a__ ( nn.Module ):
def __init__( self : Tuple,_A : RegNetConfig,_A : int,_A : int,_A : int = 1 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Dict = max(1,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : Any = (
RegNetShortCut(_A,_A,stride=_A ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Sequential(
RegNetConvLayer(_A,_A,kernel_size=1,activation=config.hidden_act ),RegNetConvLayer(_A,_A,stride=_A,groups=_A,activation=config.hidden_act ),RegNetConvLayer(_A,_A,kernel_size=1,activation=_A ),)
SCREAMING_SNAKE_CASE_ : List[str] = ACTaFN[config.hidden_act]
def __UpperCamelCase ( self : Union[str, Any],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = self.layer(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.shortcut(_A )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Optional[int] = self.activation(_A )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : Union[str, Any],_A : RegNetConfig,_A : int,_A : int,_A : int = 1 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Tuple = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Optional[int] = max(1,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : Any = (
RegNetShortCut(_A,_A,stride=_A ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_ : int = nn.Sequential(
RegNetConvLayer(_A,_A,kernel_size=1,activation=config.hidden_act ),RegNetConvLayer(_A,_A,stride=_A,groups=_A,activation=config.hidden_act ),RegNetSELayer(_A,reduced_channels=int(round(in_channels / 4 ) ) ),RegNetConvLayer(_A,_A,kernel_size=1,activation=_A ),)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ACTaFN[config.hidden_act]
def __UpperCamelCase ( self : Dict,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = self.layer(_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.shortcut(_A )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.activation(_A )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : List[Any],_A : RegNetConfig,_A : int,_A : int,_A : int = 2,_A : int = 2,):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
SCREAMING_SNAKE_CASE_ : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_A,_A,_A,stride=_A,),*[layer(_A,_A,_A ) for _ in range(depth - 1 )],)
def __UpperCamelCase ( self : Optional[Any],_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.layers(_A )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : List[str],_A : RegNetConfig ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_A,config.embedding_size,config.hidden_sizes[0],stride=2 if config.downsample_in_first_stage else 1,depth=config.depths[0],) )
SCREAMING_SNAKE_CASE_ : int = zip(config.hidden_sizes,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_A,config.depths[1:] ):
self.stages.append(RegNetStage(_A,_A,_A,depth=_A ) )
def __UpperCamelCase ( self : Optional[Any],_A : Tensor,_A : bool = False,_A : bool = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ : Dict = stage_module(_A )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A,hidden_states=_A )
class a__ ( A__ ):
A = RegNetConfig
A = 'regnet'
A = 'pixel_values'
A = True
def __UpperCamelCase ( self : Any,_A : Optional[Any] ):
"""simple docstring"""
if isinstance(_A,nn.Convad ):
nn.init.kaiming_normal_(module.weight,mode="fan_out",nonlinearity="relu" )
elif isinstance(_A,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight,1 )
nn.init.constant_(module.bias,0 )
def __UpperCamelCase ( self : str,_A : str,_A : Optional[Any]=False ):
"""simple docstring"""
if isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : int = value
__lowerCamelCase : List[str] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCamelCase : Tuple = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a__ ( A__ ):
def __init__( self : Dict,_A : int ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = config
SCREAMING_SNAKE_CASE_ : Any = RegNetEmbeddings(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = RegNetEncoder(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,output_type=_A,config_class=_CONFIG_FOR_DOC,modality="vision",expected_output=_EXPECTED_OUTPUT_SHAPE,)
def __UpperCamelCase ( self : str,_A : Tensor,_A : Optional[bool] = None,_A : Optional[bool] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : str = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.embedder(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.encoder(
_A,output_hidden_states=_A,return_dict=_A )
SCREAMING_SNAKE_CASE_ : int = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : Any = self.pooler(_A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A,pooler_output=_A,hidden_states=encoder_outputs.hidden_states,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a__ ( A__ ):
def __init__( self : str,_A : str ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RegNetModel(_A )
# classification head
SCREAMING_SNAKE_CASE_ : int = nn.Sequential(
nn.Flatten(),nn.Linear(config.hidden_sizes[-1],config.num_labels ) if config.num_labels > 0 else nn.Identity(),)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,output_type=_A,config_class=_CONFIG_FOR_DOC,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,)
def __UpperCamelCase ( self : Optional[Any],_A : Optional[torch.FloatTensor] = None,_A : Optional[torch.LongTensor] = None,_A : Optional[bool] = None,_A : Optional[bool] = None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : int = self.regnet(_A,output_hidden_states=_A,return_dict=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.classifier(_A )
SCREAMING_SNAKE_CASE_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ : Any = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_ : Any = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ : int = loss_fct(logits.squeeze(),labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_ : Any = loss_fct(_A,_A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_ : Optional[Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : List[str] = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_ : Optional[int] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_ : Dict = loss_fct(_A,_A )
if not return_dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A,logits=_A,hidden_states=outputs.hidden_states )
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class a__ :
def __init__( self : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = value
SCREAMING_SNAKE_CASE_ : Node | None = None
SCREAMING_SNAKE_CASE_ : Node | None = None
class a__ :
def __init__( self : Optional[int],_A : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tree
def __UpperCamelCase ( self : Any,_A : Node | None ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
from math import isqrt
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase ) + 1 ) )
def _snake_case ( lowerCAmelCase : int = 1_0**6 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : int = 1_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE_ : int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__lowerCamelCase : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( A__ ):
def __init__( self : Optional[int],*_A : Tuple,_A : List[Any]=None,_A : Tuple=None,_A : Optional[int]=None,**_A : Union[str, Any] ):
"""simple docstring"""
super().__init__(*_A,**_A )
SCREAMING_SNAKE_CASE_ : Tuple = eval_examples
SCREAMING_SNAKE_CASE_ : str = post_process_function
SCREAMING_SNAKE_CASE_ : Dict = quant_trainer_args
SCREAMING_SNAKE_CASE_ : Optional[int] = 128 # default number of calibration samples
def __UpperCamelCase ( self : Optional[int],_A : Optional[Any]=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
SCREAMING_SNAKE_CASE_ : Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._remove_unused_columns(_A,description="Calibration" )
return DataLoader(
_A,batch_size=self.args.eval_batch_size,collate_fn=self.data_collator,drop_last=self.args.dataloader_drop_last,num_workers=self.args.dataloader_num_workers,pin_memory=self.args.dataloader_pin_memory,shuffle=_A,)
def __UpperCamelCase ( self : List[Any],_A : Optional[Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE_ : Dict = self.get_calib_dataloader(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.model
quant_trainer.configure_model(_A,self.quant_trainer_args,calib=_A )
model.eval()
quant_trainer.enable_calibration(_A )
logger.info("***** Running calibration *****" )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(_A ):
# Prediction step
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.prediction_step(_A,_A,prediction_loss_only=_A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_A,self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : Any = model
def __UpperCamelCase ( self : Optional[Any],_A : Dict=None,_A : List[str]=None,_A : Union[str, Any]=None,_A : str = "eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : Any = self.get_eval_dataloader(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : List[str] = self.compute_metrics
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : str = eval_loop(
_A,description="Evaluation",prediction_loss_only=True if compute_metrics is None else None,ignore_keys=_A,)
finally:
SCREAMING_SNAKE_CASE_ : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE_ : Any = self.post_process_function(_A,_A,output.predictions )
SCREAMING_SNAKE_CASE_ : Any = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ : str = metrics.pop(_A )
self.log(_A )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : List[Any] = self.callback_handler.on_evaluate(self.args,self.state,self.control,_A )
return metrics
def __UpperCamelCase ( self : Any,_A : Optional[Any],_A : List[Any],_A : str=None,_A : str = "test" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : Any = eval_loop(
_A,description="Prediction",prediction_loss_only=True if compute_metrics is None else None,ignore_keys=_A,)
finally:
SCREAMING_SNAKE_CASE_ : List[str] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : List[str] = self.post_process_function(_A,_A,output.predictions,"predict" )
SCREAMING_SNAKE_CASE_ : int = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metrics.pop(_A )
return PredictionOutput(predictions=predictions.predictions,label_ids=predictions.label_ids,metrics=_A )
def __UpperCamelCase ( self : Optional[int],_A : Any="./" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.eval_dataset
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_eval_dataloader(_A )
SCREAMING_SNAKE_CASE_ : int = next(iter(_A ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(v.to(_A ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : int = self.model.to(_A )
model.eval()
model.float()
SCREAMING_SNAKE_CASE_ : Any = model.module if hasattr(_A,"module" ) else model
quant_trainer.configure_model(_A,self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : int = os.path.join(_A,"model.onnx" )
logger.info(F'exporting model to {output_model_file}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
_A,_A,_A,export_params=_A,opset_version=13,do_constant_folding=_A,input_names=["input_ids", "attention_mask", "token_type_ids"],output_names=["output_start_logits", "output_end_logits"],dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
},verbose=_A,)
logger.info("onnx export finished" )
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import argparse
import os
import re
import packaging.version
__lowerCamelCase : str = '''examples/'''
__lowerCamelCase : Dict = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCamelCase : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCamelCase : str = '''README.md'''
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : str = replace.replace("VERSION" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = re_pattern.sub(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , pattern="examples" )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not patch:
update_version_in_examples(lowerCAmelCase )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE_ : Tuple = "1. Want to contribute a new model?"
with open(lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE_ : Any = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase )
def _snake_case ( ):
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Optional[int] = REPLACE_PATTERNS["init"][0].search(lowerCAmelCase ).groups()[0]
return packaging.version.parse(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : int=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : str = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : Tuple = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : List[str] = input(f'Which version are you releasing? [{default_version}]' )
if len(lowerCAmelCase ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = default_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase , patch=lowerCAmelCase )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_version()
SCREAMING_SNAKE_CASE_ : List[Any] = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : int = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : Dict = input(f'Which version are we developing now? [{dev_version}]' )
if len(lowerCAmelCase ) == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCamelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Any,_A : List[Any],_A : List[Any]=7,_A : Tuple=3,_A : Optional[Any]=30,_A : Optional[Any]=400,_A : Union[str, Any]=True,_A : Optional[int]=None,_A : str=0.9,_A : str=None,_A : str=True,_A : int=[0.5, 0.5, 0.5],_A : Dict=[0.5, 0.5, 0.5],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else {"shortest_edge": 30}
SCREAMING_SNAKE_CASE_ : int = crop_size if crop_size is not None else {"height": 30, "width": 30}
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Any = min_resolution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE_ : int = do_resize_and_center_crop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : List[str] = crop_pct
SCREAMING_SNAKE_CASE_ : str = crop_size
SCREAMING_SNAKE_CASE_ : List[str] = do_normalize
SCREAMING_SNAKE_CASE_ : Dict = image_mean
SCREAMING_SNAKE_CASE_ : Any = image_std
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( A__ , unittest.TestCase ):
A = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = PoolFormerImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(_A,"size" ) )
self.assertTrue(hasattr(_A,"crop_pct" ) )
self.assertTrue(hasattr(_A,"do_normalize" ) )
self.assertTrue(hasattr(_A,"image_mean" ) )
self.assertTrue(hasattr(_A,"image_std" ) )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size,{"height": 30, "width": 30} )
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict,size=42,crop_size=84 )
self.assertEqual(image_processor.size,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size,{"height": 84, "width": 84} )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
SCREAMING_SNAKE_CASE_ : Dict = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
SCREAMING_SNAKE_CASE_ : Dict = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Any,_A : int,_A : Tuple=7,_A : Tuple=3,_A : int=30,_A : str=400,_A : Any=True,_A : Optional[Any]=None,_A : Optional[Any]=True,_A : Dict=[0.5, 0.5, 0.5],_A : Dict=[0.5, 0.5, 0.5],_A : str=True,_A : Optional[int]=1 / 255,_A : Dict=True,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = min_resolution
SCREAMING_SNAKE_CASE_ : List[str] = max_resolution
SCREAMING_SNAKE_CASE_ : List[str] = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : Dict = do_normalize
SCREAMING_SNAKE_CASE_ : Any = image_mean
SCREAMING_SNAKE_CASE_ : Any = image_std
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_pad
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : Union[str, Any],_A : List[str],_A : Tuple=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_inputs[0]
if isinstance(_A,Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Tuple = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE_ : int = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_ : int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ : int = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Tuple = max(_A,key=lambda _A : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Dict = max(_A,key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( A__ , unittest.TestCase ):
A = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A,"image_mean" ) )
self.assertTrue(hasattr(_A,"image_std" ) )
self.assertTrue(hasattr(_A,"do_normalize" ) )
self.assertTrue(hasattr(_A,"do_resize" ) )
self.assertTrue(hasattr(_A,"do_rescale" ) )
self.assertTrue(hasattr(_A,"do_pad" ) )
self.assertTrue(hasattr(_A,"size" ) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad,_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(_A,batched=_A )
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Any = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(_A,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(_A,batched=_A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(_A,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(_A,batched=_A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt","r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : Dict = image_processing(images=_A,annotations=_A,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3],_A,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"],_A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0],_A,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"],_A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"],_A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"],_A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"],_A ) )
# verify size
SCREAMING_SNAKE_CASE_ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"],_A ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt","r" ) as f:
SCREAMING_SNAKE_CASE_ : str = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE_ : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE_ : Any = image_processing(images=_A,annotations=_A,masks_path=_A,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3],_A,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"],_A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape,_A )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0],_A,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"],_A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"],_A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"],_A ) )
# verify masks
SCREAMING_SNAKE_CASE_ : str = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(),_A )
# verify orig_size
SCREAMING_SNAKE_CASE_ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"],_A ) )
# verify size
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"],_A ) )
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
from statistics import mean, stdev
def _snake_case ( lowerCAmelCase : list , lowerCAmelCase : int = 3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = min(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = max(lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase ) for x in data]
def _snake_case ( lowerCAmelCase : list , lowerCAmelCase : int = 3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = mean(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = stdev(lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase ) for x in data]
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
import os
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(os.path.dirname(lowerCAmelCase ) , "num.txt" )
with open(lowerCAmelCase ) as file_hand:
return str(sum(int(lowerCAmelCase ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class a__ ( unittest.TestCase ):
A = inspect.getfile(accelerate.test_utils )
A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
A = ['accelerate', 'launch']
A = Path.home() / '.cache/huggingface/accelerate'
A = 'default_config.yaml'
A = config_folder / config_file
A = config_folder / '_default_config.yaml'
A = Path('tests/test_configs' )
@classmethod
def __UpperCamelCase ( cls : str ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __UpperCamelCase ( cls : Any ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path],env=os.environ.copy() )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=_A ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(_A ), self.test_file_path],env=os.environ.copy() )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
execute_subprocess_async(["accelerate", "test"],env=os.environ.copy() )
class a__ ( unittest.TestCase ):
A = 'test-tpu'
A = 'us-central1-a'
A = 'ls'
A = ['accelerate', 'tpu-config']
A = 'cd /usr/share'
A = 'tests/test_samples/test_command_file.sh'
A = 'Running gcloud compute tpus tpu-vm ssh'
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all',_A,)
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all',_A,)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"],return_stdout=_A )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all',_A,)
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all',_A,)
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all',_A,)
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all',_A,)
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all',_A,)
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all',_A,)
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
],return_stdout=_A,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all',_A,)
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
from __future__ import annotations
__lowerCamelCase : Tuple = '''Muhammad Umer Farooq'''
__lowerCamelCase : int = '''MIT'''
__lowerCamelCase : Union[str, Any] = '''1.0.0'''
__lowerCamelCase : List[Any] = '''Muhammad Umer Farooq'''
__lowerCamelCase : Optional[Any] = '''[email protected]'''
__lowerCamelCase : Dict = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a__ ( A__ ):
def __init__( self : str,_A : str ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : list[str] = []
SCREAMING_SNAKE_CASE_ : Tuple = domain
def __UpperCamelCase ( self : str,_A : str,_A : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
SCREAMING_SNAKE_CASE_ : Tuple = parse.urljoin(self.domain,_A )
self.urls.append(_A )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(lowerCAmelCase ).split("." )[-2:] )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
return parse.urlparse(lowerCAmelCase ).netloc
def _snake_case ( lowerCAmelCase : str = "https://github.com" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_domain_name(lowerCAmelCase )
# Initialize the parser
SCREAMING_SNAKE_CASE_ : Any = Parser(lowerCAmelCase )
try:
# Open URL
SCREAMING_SNAKE_CASE_ : int = requests.get(lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
SCREAMING_SNAKE_CASE_ : Tuple = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
SCREAMING_SNAKE_CASE_ : List[str] = requests.get(lowerCAmelCase )
# Get the valid email.
SCREAMING_SNAKE_CASE_ : int = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = emails_from_url('''https://github.com''')
print(f'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
__lowerCamelCase : Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( lowerCAmelCase : float ):
"""simple docstring"""
assert type(lowerCAmelCase ) in (int, float) and decimal == int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = ""
SCREAMING_SNAKE_CASE_ : Any = False
if decimal < 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
decimal *= -1
while decimal > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = divmod(lowerCAmelCase , 1_6 )
SCREAMING_SNAKE_CASE_ : List[Any] = values[remainder] + hexadecimal
SCREAMING_SNAKE_CASE_ : Optional[int] = "0x" + hexadecimal
if negative:
SCREAMING_SNAKE_CASE_ : Optional[int] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowerCamelCase : List[str] = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__lowerCamelCase : Dict = None
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
def remove_articles(lowerCAmelCase : List[Any] ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase )
def white_space_fix(lowerCAmelCase : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase ) ) ) )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if not s:
return []
return normalize_answer(lowerCAmelCase ).split()
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
return int(normalize_answer(lowerCAmelCase ) == normalize_answer(lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = collections.Counter(lowerCAmelCase ) & collections.Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(common.values() )
if len(lowerCAmelCase ) == 0 or len(lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ : str = 1.0 * num_same / len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = 1.0 * num_same / len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE_ : Tuple = qa["id"]
SCREAMING_SNAKE_CASE_ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE_ : Any = [""]
if qid not in preds:
print(f'Missing prediction for {qid}' )
continue
SCREAMING_SNAKE_CASE_ : str = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE_ : str = max(compute_exact(lowerCAmelCase , lowerCAmelCase ) for a in gold_answers )
SCREAMING_SNAKE_CASE_ : int = max(compute_fa(lowerCAmelCase , lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE_ : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE_ : Optional[int] = float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = s
return new_scores
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
if not qid_list:
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
for k in new_eval:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_eval[k]
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
plt.step(lowerCAmelCase , lowerCAmelCase , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase , lowerCAmelCase , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase )
plt.savefig(lowerCAmelCase )
plt.clf()
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : na_probs[k] )
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
SCREAMING_SNAKE_CASE_ : int = 1.0
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1.0]
SCREAMING_SNAKE_CASE_ : List[str] = [0.0]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.0
for i, qid in enumerate(lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE_ : Tuple = true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE_ : List[str] = true_pos / float(lowerCAmelCase )
if i == len(lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase )
recalls.append(lowerCAmelCase )
if out_image:
plot_pr_curve(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if out_image_dir and not os.path.exists(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE_ : List[Any] = make_precision_recall_eval(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , out_image=os.path.join(lowerCAmelCase , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
SCREAMING_SNAKE_CASE_ : int = make_precision_recall_eval(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , out_image=os.path.join(lowerCAmelCase , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
SCREAMING_SNAKE_CASE_ : Dict = {k: float(lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE_ : Any = make_precision_recall_eval(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , out_image=os.path.join(lowerCAmelCase , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase , lowerCAmelCase , "pr_exact" )
merge_eval(lowerCAmelCase , lowerCAmelCase , "pr_f1" )
merge_eval(lowerCAmelCase , lowerCAmelCase , "pr_oracle" )
def _snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
if not qid_list:
return
SCREAMING_SNAKE_CASE_ : int = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE_ : List[Any] = np.ones_like(lowerCAmelCase ) / float(len(lowerCAmelCase ) )
plt.hist(lowerCAmelCase , weights=lowerCAmelCase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(lowerCAmelCase , f'na_prob_hist_{name}.png' ) )
plt.clf()
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE_ : List[str] = num_no_ans
SCREAMING_SNAKE_CASE_ : List[str] = cur_score
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0
SCREAMING_SNAKE_CASE_ : List[str] = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE_ : Tuple = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE_ : int = -1
else:
SCREAMING_SNAKE_CASE_ : str = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE_ : Optional[int] = cur_score
SCREAMING_SNAKE_CASE_ : str = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase ), best_thresh
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = find_best_thresh(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_best_thresh(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = best_exact
SCREAMING_SNAKE_CASE_ : Dict = exact_thresh
SCREAMING_SNAKE_CASE_ : List[str] = best_fa
SCREAMING_SNAKE_CASE_ : int = fa_thresh
def _snake_case ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE_ : int = json.load(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE_ : int = json.load(lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.load(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : str = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE_ : str = make_qid_to_has_ans(lowerCAmelCase ) # maps qid to True/False
SCREAMING_SNAKE_CASE_ : int = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_raw_scores(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = apply_no_ans_threshold(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE_ : Any = apply_no_ans_threshold(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE_ : List[str] = make_eval_dict(lowerCAmelCase , lowerCAmelCase )
if has_ans_qids:
SCREAMING_SNAKE_CASE_ : int = make_eval_dict(lowerCAmelCase , lowerCAmelCase , qid_list=lowerCAmelCase )
merge_eval(lowerCAmelCase , lowerCAmelCase , "HasAns" )
if no_ans_qids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = make_eval_dict(lowerCAmelCase , lowerCAmelCase , qid_list=lowerCAmelCase )
merge_eval(lowerCAmelCase , lowerCAmelCase , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase , lowerCAmelCase , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase , lowerCAmelCase , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
else:
print(json.dumps(lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( A__ ):
def __init__( self : Dict,_A : List[Any],_A : Any ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A,scheduler=_A )
@torch.no_grad()
def __call__( self : Dict,_A : int = 1,_A : Optional[torch.Generator] = None,_A : int = 50,_A : Optional[str] = "pil",_A : bool = True,**_A : str,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),generator=_A,)
SCREAMING_SNAKE_CASE_ : int = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_ : Tuple = self.unet(_A,_A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler.step(_A,_A,_A ).prev_sample
SCREAMING_SNAKE_CASE_ : str = (image / 2 + 0.5).clamp(0,1 )
SCREAMING_SNAKE_CASE_ : Tuple = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_A ), "This is a local test"
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCamelCase : Optional[int] = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ : Tuple = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE_ : int = False
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ : str = False
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowerCAmelCase , required=lowerCAmelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowerCAmelCase , required=lowerCAmelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowerCAmelCase , choices=["distilbert", "roberta", "gpt2"] , required=lowerCAmelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowerCAmelCase , type=lowerCAmelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowerCAmelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowerCAmelCase , required=lowerCAmelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowerCAmelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowerCAmelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowerCAmelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowerCAmelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowerCAmelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowerCAmelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowerCAmelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowerCAmelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowerCAmelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowerCAmelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowerCAmelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowerCAmelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=lowerCAmelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowerCAmelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCAmelCase , default=5_0 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowerCAmelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowerCAmelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=lowerCAmelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=lowerCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowerCAmelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowerCAmelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowerCAmelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowerCAmelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowerCAmelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowerCAmelCase , default=5_6 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowerCAmelCase , default=5_0_0 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowerCAmelCase , default=4_0_0_0 , help="Checkpoint interval." )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
sanity_checks(lowerCAmelCase )
# ARGS #
init_gpu_params(lowerCAmelCase )
set_seed(lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowerCAmelCase ) , lowerCAmelCase , indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE_ : Dict = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.all_special_tokens.index(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
SCREAMING_SNAKE_CASE_ : Tuple = special_tok_ids
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ : Optional[int] = pickle.load(lowerCAmelCase )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ : str = pickle.load(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = np.maximum(lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE_ : str = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : List[str] = LmSeqsDataset(params=lowerCAmelCase , data=lowerCAmelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE_ : List[Any] = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[str] = student_model_class(lowerCAmelCase )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
SCREAMING_SNAKE_CASE_ : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase , lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase , lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : int = Distiller(
params=lowerCAmelCase , dataset=lowerCAmelCase , token_probs=lowerCAmelCase , student=lowerCAmelCase , teacher=lowerCAmelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__lowerCamelCase : List[str] = HfApi()
__lowerCamelCase : Union[str, Any] = {}
# fmt: off
__lowerCamelCase : Any = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
__lowerCamelCase : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
__lowerCamelCase : int = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
__lowerCamelCase : Optional[int] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
__lowerCamelCase : List[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
__lowerCamelCase : Tuple = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
__lowerCamelCase : Any = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
__lowerCamelCase : Any = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
__lowerCamelCase : Optional[Any] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
__lowerCamelCase : Tuple = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
__lowerCamelCase : List[str] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
__lowerCamelCase : List[str] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
__lowerCamelCase : Any = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
__lowerCamelCase : Dict = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
__lowerCamelCase : List[str] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
__lowerCamelCase : str = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__lowerCamelCase : Optional[int] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__lowerCamelCase : Any = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__lowerCamelCase : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__lowerCamelCase : List[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = b.T
SCREAMING_SNAKE_CASE_ : List[Any] = np.sum(np.square(lowerCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.sum(np.square(lowerCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.matmul(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_ : Tuple = squared_euclidean_distance(lowerCAmelCase , lowerCAmelCase )
return np.argmin(lowerCAmelCase , axis=1 )
class a__ ( A__ ):
A = ['pixel_values']
def __init__( self : Any,_A : Optional[Union[List[List[int]], np.ndarray]] = None,_A : bool = True,_A : Dict[str, int] = None,_A : PILImageResampling = PILImageResampling.BILINEAR,_A : bool = True,_A : bool = True,**_A : int,):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_A )
SCREAMING_SNAKE_CASE_ : str = np.array(_A ) if clusters is not None else None
SCREAMING_SNAKE_CASE_ : int = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : Dict = resample
SCREAMING_SNAKE_CASE_ : List[str] = do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_color_quantize
def __UpperCamelCase ( self : Any,_A : np.ndarray,_A : Dict[str, int],_A : PILImageResampling = PILImageResampling.BILINEAR,_A : Optional[Union[str, ChannelDimension]] = None,**_A : Union[str, Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
_A,size=(size["height"], size["width"]),resample=_A,data_format=_A,**_A )
def __UpperCamelCase ( self : str,_A : np.ndarray,_A : Optional[Union[str, ChannelDimension]] = None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = rescale(image=_A,scale=1 / 127.5,data_format=_A )
SCREAMING_SNAKE_CASE_ : Dict = image - 1
return image
def __UpperCamelCase ( self : Optional[Any],_A : ImageInput,_A : bool = None,_A : Dict[str, int] = None,_A : PILImageResampling = None,_A : bool = None,_A : Optional[bool] = None,_A : Optional[Union[List[List[int]], np.ndarray]] = None,_A : Optional[Union[str, TensorType]] = None,_A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,**_A : List[str],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(_A )
SCREAMING_SNAKE_CASE_ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_ : int = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(_A )
SCREAMING_SNAKE_CASE_ : Any = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.resize(image=_A,size=_A,resample=_A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : int = [self.normalize(image=_A ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_ : List[Any] = [to_channel_dimension_format(_A,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_ : Dict = np.array(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = color_quantize(_A,_A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_ : Optional[int] = images.shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = images.reshape(_A,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_ : str = list(_A )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_channel_dimension_format(_A,_A ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] = {"input_ids": images}
return BatchFeature(data=_A,tensor_type=_A )
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
from __future__ import annotations
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ): # noqa: E741
"""simple docstring"""
while r - l > 1:
SCREAMING_SNAKE_CASE_ : str = (l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = m
else:
SCREAMING_SNAKE_CASE_ : Tuple = m # noqa: E741
return r
def _snake_case ( lowerCAmelCase : list[int] ):
"""simple docstring"""
if len(lowerCAmelCase ) == 0:
return 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0] * len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v[0]
for i in range(1 , len(lowerCAmelCase ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE_ : int = v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE_ : str = v[i]
length += 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
__lowerCamelCase : Optional[Any] = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
__lowerCamelCase : Optional[int] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
__lowerCamelCase : int = BeautifulSoup(res.text, '''html.parser''')
__lowerCamelCase : Any = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=3,out_channels=3,down_block_types=("DownBlock2D", "AttnDownBlock2D"),up_block_types=("AttnUpBlock2D", "UpBlock2D"),)
return model
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Optional[Any] = PNDMScheduler()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMPipeline(unet=_A,scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pndm(generator=_A,num_inference_steps=20,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pndm(generator=_A,num_inference_steps=20,output_type="numpy",return_dict=_A )[0]
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "google/ddpm-cifar10-32"
SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : int = PNDMScheduler()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMPipeline(unet=_A,scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = pndm(generator=_A,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : int = 1_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class a__ ( nn.Module ):
def __init__( self : str,_A : nn.Module,_A : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : str = module
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features,_A,bias=_A ),nn.Linear(_A,module.out_features,bias=_A ),)
SCREAMING_SNAKE_CASE_ : str = (2.0 / (5 * min(module.in_features,module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight,std=_A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCamelCase ( self : Optional[Any],_A : Dict,*_A : int,**_A : Dict ):
"""simple docstring"""
return self.module(_A,*_A,**_A ) + self.adapter(_A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A = 'bigscience/bloom-1b7'
# Constant values
A = 2.109_6595_5269_2574
A = 'Hello my name is'
A = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
A = 10
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class a__ ( A__ ):
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name,torch_dtype=torch.floataa,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_abit.config
self.assertTrue(hasattr(_A,"quantization_config" ) )
SCREAMING_SNAKE_CASE_ : Any = config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.to_diff_dict()
SCREAMING_SNAKE_CASE_ : int = config.to_json_string()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE_ : str = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit,self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_A,torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(self.input_text,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Any = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=_A ),self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(self.input_text,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=_A ),self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(_A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BitsAndBytesConfig()
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=_A,load_in_abit=_A,device_map="auto",bnb_abit_quant_type="nf4",)
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(_A ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(_A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(self.input_text,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : int = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ : str = self.model_fpaa.to("cpu" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ : int = self.model_fpaa.float()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForSeqaSeqLM.from_pretrained("t5-small",load_in_abit=_A,device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "t5-small"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "Translate in German: Hello, my dog is cute"
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ : Dict = None
# test with `t5-small`
SCREAMING_SNAKE_CASE_ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**_A )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : Tuple = model.generate(**_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = modules
def __UpperCamelCase ( self : str ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q,bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**_A )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=_A,device_map="auto" )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(self.input_text,return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ : List[str] = model.generate(**_A )
class a__ ( A__ ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# model_name
SCREAMING_SNAKE_CASE_ : str = "bigscience/bloom-560m"
SCREAMING_SNAKE_CASE_ : Optional[int] = "t5-small"
# Different types of model
SCREAMING_SNAKE_CASE_ : List[str] = AutoModel.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
# Sequence classification model
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name,load_in_abit=_A,device_map="auto" )
# CausalLM model
SCREAMING_SNAKE_CASE_ : int = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=_A,device_map="auto" )
# Seq2seq model
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name,load_in_abit=_A,device_map="auto" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class a__ ( A__ ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = pipeline(
"text-generation",model=self.model_name,model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa},max_new_tokens=self.MAX_NEW_TOKENS,)
# Real second forward pass
SCREAMING_SNAKE_CASE_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"],self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class a__ ( A__ ):
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name,load_in_abit=_A,device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ),{0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(self.input_text,return_tensors="pt" )
# Second real batch
SCREAMING_SNAKE_CASE_ : Optional[int] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0],skip_special_tokens=_A ),self.EXPECTED_OUTPUTS )
class a__ ( A__ ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "facebook/opt-350m"
super().setUp()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=_A )
self.assertEqual(set(model.hf_device_map.values() ),{torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE_ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE_ : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_A ) ):
SCREAMING_SNAKE_CASE_ : Dict = LoRALayer(module.q_proj,rank=16 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LoRALayer(module.k_proj,rank=16 )
SCREAMING_SNAKE_CASE_ : List[Any] = LoRALayer(module.v_proj,rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE_ : int = self.tokenizer("Test batch ",return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE_ : int = model.forward(**_A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_A,_A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_A,nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class a__ ( A__ ):
A = 'gpt2-xl'
A = 3.3191_8548_5415_2187
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
import math
import unittest
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ),"Zero doesn't have any positive factors, primes must have exactly two.",)
self.assertFalse(
is_prime(1 ),"One only has 1 positive factor, primes must have exactly two.",)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : int = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : List[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : int = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = JsonDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Any = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE_ : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : List[Any] = JsonDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = features.copy()
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JsonDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : Optional[int] = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = jsonl_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = [jsonl_path]
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_json_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : Optional[int] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Tuple = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = JsonDatasetReader({"train": jsonl_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Any = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "train"
SCREAMING_SNAKE_CASE_ : List[Any] = {"train": jsonl_path, "test": jsonl_path}
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : List[str] = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
return json.load(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return [json.loads(lowerCAmelCase ) for line in buffer]
class a__ :
@pytest.mark.parametrize("lines, load_json_function",[(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : Optional[Any],_A : Optional[Any],_A : str,_A : Union[str, Any] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : Dict = load_json_function(_A )
assert isinstance(_A,_A )
assert isinstance(exported_content[0],_A )
assert len(_A ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],)
def __UpperCamelCase ( self : str,_A : Any,_A : List[str],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A,orient=_A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : str = load_json(_A )
assert isinstance(_A,_A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A,"keys" ) and not hasattr(exported_content[0],"keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_A ) == 10
@pytest.mark.parametrize("lines, load_json_function",[(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : Tuple,_A : int,_A : Any,_A : Optional[int] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A,num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = load_json_function(_A )
assert isinstance(_A,_A )
assert isinstance(exported_content[0],_A )
assert len(_A ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],)
def __UpperCamelCase ( self : Union[str, Any],_A : Dict,_A : Optional[int],_A : Optional[int],_A : Dict,_A : int ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A,orient=_A,num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : Any = load_json(_A )
assert isinstance(_A,_A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A,"keys" ) and not hasattr(exported_content[0],"keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_A ) == 10
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
with pytest.raises(_A ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,num_proc=0 )
@pytest.mark.parametrize("compression, extension",[("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : Optional[int],_A : str,_A : Union[str, Any],_A : Optional[Any],_A : List[Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tmp_path_factory.mktemp("data" ) / F'test.json.{extension}'
SCREAMING_SNAKE_CASE_ : Any = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(_A,_A,compression=_A ).write()
with fsspec.open(_A,"rb",compression="infer" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f.read()
with fsspec.open(_A,"rb",compression="infer" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
assert exported_content == original_content
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a__ ( A__ ):
A = (DPMSolverSinglestepScheduler,)
A = (('num_inference_steps', 25),)
def __UpperCamelCase ( self : Optional[Any],**_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_A )
return config
def __UpperCamelCase ( self : str,_A : Dict=0,**_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("num_inference_steps",_A )
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = sample, sample
for t in range(_A,time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE_ : str = scheduler.step(_A,_A,_A,**_A ).prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_scheduler.step(_A,_A,_A,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Any,_A : List[Any]=0,**_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("num_inference_steps",_A )
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample
SCREAMING_SNAKE_CASE_ : int = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(_A,_A,_A,**_A ).prev_sample
SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step(_A,_A,_A,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : int,_A : Dict=None,**_A : List[str] ):
"""simple docstring"""
if scheduler is None:
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config(**_A )
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config(**_A )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ : List[Any] = 10
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : int = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(_A,_A,_A ).prev_sample
return sample
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 50
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_A )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(_A,_A,_A ).prev_sample
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(scheduler=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
SCREAMING_SNAKE_CASE_ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : int = self.full_loop(scheduler=_A )
SCREAMING_SNAKE_CASE_ : int = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A,prediction_type=_A,sample_max_value=_A,algorithm_type="dpmsolver++",solver_order=_A,solver_type=_A,)
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A,solver_type=_A,prediction_type=_A,algorithm_type=_A,)
SCREAMING_SNAKE_CASE_ : str = self.full_loop(
solver_order=_A,solver_type=_A,prediction_type=_A,algorithm_type=_A,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __UpperCamelCase ( self : int ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.check_over_configs(variance_type=_A )
self.check_over_configs(variance_type="learned_range" )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A,time_step=0 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop(use_karras_sigmas=_A )
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.full_loop(prediction_type="v_prediction",use_karras_sigmas=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config(thresholding=_A,dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE_ : Any = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ : Any = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step(_A,_A,_A ).prev_sample
assert sample.dtype == torch.floataa
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
SCREAMING_SNAKE_CASE_ : str = DatasetInfosDict.from_directory(lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : DatasetInfo ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = str(lowerCAmelCase )
dataset_info.write_to_directory(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = DatasetInfo.from_directory(lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase , "dataset_info.json" ) )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = yaml.safe_dump(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = yaml.safe_load(lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = DatasetInfo()
SCREAMING_SNAKE_CASE_ : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : DatasetInfosDict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCAmelCase )
dataset_infos_dict.write_to_directory(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = DatasetInfosDict.from_directory(lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE_ : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE_ : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase , "README.md" ) )
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( A__ ):
A = ['image_processor', 'tokenizer']
A = 'ChineseCLIPImageProcessor'
A = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Any,_A : str=None,_A : Union[str, Any]=None,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",_A,)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor
def __call__( self : Dict,_A : Optional[Any]=None,_A : str=None,_A : List[Any]=None,**_A : str ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(_A,return_tensors=_A,**_A )
if images is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(_A,return_tensors=_A,**_A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ),tensor_type=_A )
def __UpperCamelCase ( self : Union[str, Any],*_A : str,**_A : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A,**_A )
def __UpperCamelCase ( self : str,*_A : str,**_A : int ):
"""simple docstring"""
return self.tokenizer.decode(*_A,**_A )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",_A,)
return self.image_processor_class
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
from __future__ import annotations
import math
__lowerCamelCase : Tuple = '''2020.9.26'''
__lowerCamelCase : Any = '''xcodz-dot, cclaus, dhruvmanila'''
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if not all(isinstance(lowerCAmelCase , (float, int) ) for val in locals().values() ):
SCREAMING_SNAKE_CASE_ : List[Any] = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = ((x * distance) / (z + distance)) * scale
SCREAMING_SNAKE_CASE_ : int = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : str , lowerCAmelCase : float ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Axis must be a str" )
SCREAMING_SNAKE_CASE_ : Any = locals()
del input_variables["axis"]
if not all(isinstance(lowerCAmelCase , (float, int) ) for val in input_variables.values() ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
"Input values except axis must either be float or int: "
f'{list(input_variables.values() )}'
)
raise TypeError(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
SCREAMING_SNAKE_CASE_ : Any = x * math.cos(lowerCAmelCase ) - y * math.sin(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = y * math.cos(lowerCAmelCase ) + x * math.sin(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = z
elif axis == "x":
SCREAMING_SNAKE_CASE_ : str = y * math.cos(lowerCAmelCase ) - z * math.sin(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = z * math.cos(lowerCAmelCase ) + y * math.sin(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = x
elif axis == "y":
SCREAMING_SNAKE_CASE_ : Any = x * math.cos(lowerCAmelCase ) - z * math.sin(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = z * math.cos(lowerCAmelCase ) + x * math.sin(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a__ ( A__ ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self._create_example_records()
SCREAMING_SNAKE_CASE_ : List[str] = Dataset.from_list(_A )
self.assertListEqual(dset.column_names,["col_1", "col_2"] )
for i, r in enumerate(_A ):
self.assertDictEqual(_A,example_records[i] )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE_ : Any = Dataset.from_list(_A )
SCREAMING_SNAKE_CASE_ : Dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info,dset_from_dict.info )
def __UpperCamelCase ( self : Tuple ): # checks what happens with missing columns
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [{"col_1": 1}, {"col_2": "x"}]
SCREAMING_SNAKE_CASE_ : Tuple = Dataset.from_list(_A )
self.assertDictEqual(dset[0],{"col_1": 1} )
self.assertDictEqual(dset[1],{"col_1": None} ) # NB: first record is used for columns
def __UpperCamelCase ( self : Any ): # checks if the type can be inferred from the second record
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [{"col_1": []}, {"col_1": [1, 2]}]
SCREAMING_SNAKE_CASE_ : Optional[Any] = Dataset.from_list(_A )
self.assertEqual(dset.info.features["col_1"],Sequence(Value("int64" ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(_A ),0 )
self.assertListEqual(dset.column_names,[] )
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE_ : str = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE_ : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(lowerCAmelCase )]
# Reverse whole list
SCREAMING_SNAKE_CASE_ : int = arr[cur - 1 :: -1] + arr[cur : len(lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : str = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a__ ( A__ ):
A = 'gpt_neo'
A = ['past_key_values']
A = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Tuple,_A : str=5_0257,_A : Dict=2048,_A : Any=2048,_A : Optional[int]=24,_A : List[str]=[[["global", "local"], 12]],_A : List[str]=16,_A : Union[str, Any]=None,_A : Optional[Any]=256,_A : Any="gelu_new",_A : int=0.0,_A : Optional[Any]=0.0,_A : Any=0.0,_A : str=0.1,_A : Any=1E-5,_A : int=0.02,_A : List[Any]=True,_A : Union[str, Any]=5_0256,_A : List[str]=5_0256,**_A : Tuple,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_layers
SCREAMING_SNAKE_CASE_ : str = num_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : str = window_size
SCREAMING_SNAKE_CASE_ : Tuple = activation_function
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resid_dropout
SCREAMING_SNAKE_CASE_ : int = embed_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_ : Tuple = bos_token_id
SCREAMING_SNAKE_CASE_ : Tuple = eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_types
SCREAMING_SNAKE_CASE_ : str = self.expand_attention_types_params(_A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=_A,eos_token_id=_A,**_A )
@staticmethod
def __UpperCamelCase ( _A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : str = input.size()
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = shape[dimension]
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(0 , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.div(sizedim - size , lowerCAmelCase , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE_ : Tuple = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE_ : List[str] = [slice(lowerCAmelCase )] * rank
SCREAMING_SNAKE_CASE_ : List[Any] = indices
SCREAMING_SNAKE_CASE_ : Optional[Any] = input[s]
SCREAMING_SNAKE_CASE_ : Any = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : str = torch.arange(1 , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = torch.remainder(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = remainders == 0
SCREAMING_SNAKE_CASE_ : str = candidates[divisor_indices]
SCREAMING_SNAKE_CASE_ : str = torch.max(lowerCAmelCase )
return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode="floor" )
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_A,direction="inputs" )
SCREAMING_SNAKE_CASE_ : int = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ : Dict = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self._config.num_heads
def __UpperCamelCase ( self : Optional[int],_A : PreTrainedTokenizer,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super(_A,self ).generate_dummy_inputs(
_A,batch_size=_A,seq_length=_A,is_pair=_A,framework=_A )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE_ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ : Optional[int] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ : Optional[int] = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE_ : Tuple = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ : str = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_A,_A,dtype=_A )],dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return 13
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
from __future__ import annotations
def _snake_case ( lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(lowerCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(lowerCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( A__ , unittest.TestCase ):
A = KandinskyVaaPriorPipeline
A = ['prompt']
A = ['prompt', 'negative_prompt']
A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
A = False
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,)
return CLIPTextModelWithProjection(_A )
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
SCREAMING_SNAKE_CASE_ : List[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
SCREAMING_SNAKE_CASE_ : Dict = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,image_size=224,projection_dim=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_channels=3,num_hidden_layers=5,patch_size=14,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPVisionModelWithProjection(_A )
return model
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPImageProcessor(
crop_size=224,do_center_crop=_A,do_normalize=_A,do_resize=_A,image_mean=[0.48145466, 0.4578275, 0.40821073],image_std=[0.26862954, 0.26130258, 0.27577711],resample=3,size=224,)
return image_processor
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_image_encoder
SCREAMING_SNAKE_CASE_ : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_image_processor
SCREAMING_SNAKE_CASE_ : Any = UnCLIPScheduler(
variance_type="fixed_small_log",prediction_type="sample",num_train_timesteps=1000,clip_sample=_A,clip_sample_range=10.0,)
SCREAMING_SNAKE_CASE_ : str = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def __UpperCamelCase ( self : Tuple,_A : int,_A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : str = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "cpu"
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[str] = pipe(**self.get_dummy_inputs(_A ) )
SCREAMING_SNAKE_CASE_ : int = output.image_embeds
SCREAMING_SNAKE_CASE_ : Any = pipe(
**self.get_dummy_inputs(_A ),return_dict=_A,)[0]
SCREAMING_SNAKE_CASE_ : Any = image[0, -10:]
SCREAMING_SNAKE_CASE_ : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch_device == "cpu"
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[str] = False
self._test_inference_batch_single_identical(
test_max_difference=_A,relax_max_difference=_A,test_mean_pixel_difference=_A,)
@skip_mps
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = torch_device == "cpu"
SCREAMING_SNAKE_CASE_ : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A,test_mean_pixel_difference=_A,)
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__lowerCamelCase : Dict = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCamelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
with open(lowerCAmelCase , "rb" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class a__ :
A = field(
default=A__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A = field(default=A__ , metadata={'help': 'A folder containing the training data.'} )
A = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} )
A = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class a__ :
A = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A = field(
default=A__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(A__ )} , )
A = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
A = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A = field(
default=A__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.stack([example["pixel_values"] for example in examples] )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Any = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE_ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE_ : str = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE_ : int = os.path.join(data_args.validation_dir , "**" )
SCREAMING_SNAKE_CASE_ : Dict = load_dataset(
"imagefolder" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ : Optional[Any] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ : str = dataset["train"].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ : str = split["train"]
SCREAMING_SNAKE_CASE_ : List[Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE_ : Any = dataset["train"].features["labels"].names
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = {}, {}
for i, label in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE_ : Tuple = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE_ : Tuple = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ : List[str] = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE_ : List[str] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE_ : Dict = Compose(
[
RandomResizedCrop(lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Compose(
[
Resize(lowerCAmelCase ),
CenterCrop(lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ : str = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ : int = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : List[str] = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE_ : Dict = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = last_checkpoint
SCREAMING_SNAKE_CASE_ : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ : int = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase )
trainer.save_metrics("eval" , lowerCAmelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__lowerCamelCase : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__lowerCamelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class a__ ( A__ ):
A = 'whisper'
A = ['past_key_values']
A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any],_A : Any=5_1865,_A : Optional[int]=80,_A : List[str]=6,_A : List[str]=4,_A : List[Any]=6,_A : Tuple=4,_A : Any=1536,_A : List[str]=1536,_A : Union[str, Any]=0.0,_A : Dict=0.0,_A : str=5_0257,_A : Optional[int]=True,_A : Optional[Any]=True,_A : Union[str, Any]="gelu",_A : List[str]=256,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.02,_A : int=False,_A : Tuple=1500,_A : Optional[Any]=448,_A : List[Any]=5_0256,_A : Tuple=5_0256,_A : Dict=5_0256,_A : Dict=None,_A : Union[str, Any]=[220, 5_0256],_A : Optional[int]=False,_A : int=256,_A : str=False,_A : Optional[int]=0.05,_A : List[Any]=10,_A : Dict=2,_A : str=0.0,_A : Union[str, Any]=10,_A : Optional[int]=0,_A : List[Any]=7,**_A : int,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Any = num_mel_bins
SCREAMING_SNAKE_CASE_ : Dict = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = decoder_layers
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Tuple = dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = activation_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE_ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : int = max_source_positions
SCREAMING_SNAKE_CASE_ : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : List[str] = classifier_proj_size
SCREAMING_SNAKE_CASE_ : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : List[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_prob
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_length
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : List[Any] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : str = mask_feature_length
SCREAMING_SNAKE_CASE_ : Dict = mask_feature_min_masks
SCREAMING_SNAKE_CASE_ : Dict = median_filter_width
super().__init__(
pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,is_encoder_decoder=_A,decoder_start_token_id=_A,suppress_tokens=_A,begin_suppress_tokens=_A,**_A,)
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ : str = {0: "batch"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A,direction="inputs" )
return common_inputs
def __UpperCamelCase ( self : Any,_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional["TensorType"] = None,_A : int = 2_2050,_A : float = 5.0,_A : int = 220,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = OrderedDict()
SCREAMING_SNAKE_CASE_ : Tuple = OnnxConfig.generate_dummy_inputs(
self,preprocessor=preprocessor.feature_extractor,batch_size=_A,framework=_A,sampling_rate=_A,time_duration=_A,frequency=_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_inputs["input_features"].shape[2]
SCREAMING_SNAKE_CASE_ : int = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE_ : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer,_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_inputs.pop("input_features" )
SCREAMING_SNAKE_CASE_ : List[str] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE_ : Any = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return 1E-3
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : Tuple = NewType('''DataClass''', Any)
__lowerCamelCase : Tuple = NewType('''DataClassType''', Any)
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {str(lowerCAmelCase ): choice for choice in choices}
return lambda lowerCAmelCase : str_to_choice.get(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( *,
lowerCAmelCase : Union[str, List[str]] = None , lowerCAmelCase : str = None , lowerCAmelCase : Any = dataclasses.MISSING , lowerCAmelCase : Callable[[], Any] = dataclasses.MISSING , lowerCAmelCase : dict = None , **lowerCAmelCase : Dict , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if aliases is not None:
SCREAMING_SNAKE_CASE_ : Dict = aliases
if help is not None:
SCREAMING_SNAKE_CASE_ : int = help
return dataclasses.field(metadata=lowerCAmelCase , default=lowerCAmelCase , default_factory=lowerCAmelCase , **lowerCAmelCase )
class a__ ( A__ ):
A = 42
def __init__( self : str,_A : Union[DataClassType, Iterable[DataClassType]],**_A : Union[str, Any] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE_ : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**_A )
if dataclasses.is_dataclass(_A ):
SCREAMING_SNAKE_CASE_ : Tuple = [dataclass_types]
SCREAMING_SNAKE_CASE_ : int = list(_A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A )
@staticmethod
def __UpperCamelCase ( _A : ArgumentParser,_A : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = F'--{field.name}'
SCREAMING_SNAKE_CASE_ : Dict = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type,_A ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop("aliases",[] )
if isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : str = [aliases]
SCREAMING_SNAKE_CASE_ : str = getattr(field.type,"__origin__",field.type )
if origin_type is Union or (hasattr(_A,"UnionType" ) and isinstance(_A,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_A ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F' Problem encountered in field \'{field.name}\'.' )
if type(_A ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE_ : Any = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE_ : Tuple = getattr(field.type,"__origin__",field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE_ : int = (
field.type.__args__[0] if isinstance(_A,field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE_ : Dict = getattr(field.type,"__origin__",field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE_ : Tuple = {}
if origin_type is Literal or (isinstance(field.type,_A ) and issubclass(field.type,_A )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE_ : Tuple = field.type.__args__
else:
SCREAMING_SNAKE_CASE_ : Any = [x.value for x in field.type]
SCREAMING_SNAKE_CASE_ : Tuple = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ : Dict = field.default
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE_ : Optional[Any] = copy(_A )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE_ : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE_ : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE_ : Union[str, Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE_ : str = "?"
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE_ : int = True
elif isclass(_A ) and issubclass(_A,_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field.type.__args__[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = "+"
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ : str = True
else:
SCREAMING_SNAKE_CASE_ : Dict = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ : Optional[Any] = field.default_factory()
else:
SCREAMING_SNAKE_CASE_ : List[str] = True
parser.add_argument(_A,*_A,**_A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE_ : Dict = False
parser.add_argument(F'--no_{field.name}',action="store_false",dest=field.name,**_A )
def __UpperCamelCase ( self : str,_A : DataClassType ):
"""simple docstring"""
if hasattr(_A,"_argument_group_name" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = self
try:
SCREAMING_SNAKE_CASE_ : Dict[str, type] = get_type_hints(_A )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ".".join(map(_A,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(_A ):
if not field.init:
continue
SCREAMING_SNAKE_CASE_ : int = type_hints[field.name]
self._parse_dataclass_field(_A,_A )
def __UpperCamelCase ( self : Any,_A : Union[str, Any]=None,_A : str=False,_A : str=True,_A : List[str]=None,_A : Any=None,):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE_ : Tuple = []
if args_filename:
args_files.append(Path(_A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE_ : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(_A,type=_A,action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = args_file_parser.parse_known_args(args=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vars(_A ).get(args_file_flag.lstrip("-" ),_A )
if cmd_args_file_paths:
args_files.extend([Path(_A ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE_ : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE_ : Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.parse_known_args(args=_A )
SCREAMING_SNAKE_CASE_ : Tuple = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE_ : List[Any] = {f.name for f in dataclasses.fields(_A ) if f.init}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {k: v for k, v in vars(_A ).items() if k in keys}
for k in keys:
delattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = dtype(**_A )
outputs.append(_A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __UpperCamelCase ( self : Any,_A : Dict[str, Any],_A : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = set(args.keys() )
SCREAMING_SNAKE_CASE_ : Tuple = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE_ : List[str] = {f.name for f in dataclasses.fields(_A ) if f.init}
SCREAMING_SNAKE_CASE_ : Tuple = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE_ : Dict = dtype(**_A )
outputs.append(_A )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(_A )}' )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : str,_A : bool = False ):
"""simple docstring"""
with open(Path(_A ),encoding="utf-8" ) as open_json_file:
SCREAMING_SNAKE_CASE_ : Any = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE_ : List[str] = self.parse_dict(_A,allow_extra_keys=_A )
return tuple(_A )
def __UpperCamelCase ( self : str,_A : str,_A : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.parse_dict(yaml.safe_load(Path(_A ).read_text() ),allow_extra_keys=_A )
return tuple(_A )
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( A__ , unittest.TestCase ):
A = KandinskyVaaControlnetImgaImgPipeline
A = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
A = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
A = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A = False
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDConditionModel(**_A )
return model
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_unet
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_movq
SCREAMING_SNAKE_CASE_ : List[str] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = DDIMScheduler(**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase ( self : Dict,_A : Any,_A : str=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 64, 64),rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ : Tuple = image.cpu().permute(0,2,3,1 )[0]
SCREAMING_SNAKE_CASE_ : Any = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 64, 64),rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "cpu"
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**self.get_dummy_inputs(_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = output.images
SCREAMING_SNAKE_CASE_ : int = pipe(
**self.get_dummy_inputs(_A ),return_dict=_A,)[0]
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE_ : int = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(np.array(_A ) ).float() / 255.0
SCREAMING_SNAKE_CASE_ : str = hint.permute(2,0,1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : int = "A robot, 4k photo"
SCREAMING_SNAKE_CASE_ : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior",torch_dtype=torch.floataa )
pipe_prior.to(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth",torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = pipe_prior(
_A,image=_A,strength=0.85,generator=_A,negative_prompt="",).to_tuple()
SCREAMING_SNAKE_CASE_ : str = pipeline(
image=_A,image_embeds=_A,negative_image_embeds=_A,hint=_A,generator=_A,num_inference_steps=100,height=512,width=512,strength=0.5,output_type="np",)
SCREAMING_SNAKE_CASE_ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A,_A )
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str = " " ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
for index, char in enumerate(lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE_ : Dict = index + 1
elif index + 1 == len(lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
def merge(lowerCAmelCase : list , lowerCAmelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCAmelCase ) <= 1:
return collection
SCREAMING_SNAKE_CASE_ : List[str] = len(lowerCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : int = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a__ :
def __init__( self : Union[str, Any],_A : str,_A : int=2,_A : Tuple=True,_A : str=False,_A : Union[str, Any]=10,_A : Optional[int]=3,_A : Union[str, Any]=32 * 4,_A : List[str]=32 * 6,_A : Any=4,_A : Optional[Any]=32,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ : Optional[int] = num_queries
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Any = min_size
SCREAMING_SNAKE_CASE_ : Optional[int] = max_size
SCREAMING_SNAKE_CASE_ : str = num_labels
SCREAMING_SNAKE_CASE_ : Any = mask_feature_size
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size],device=_A )
SCREAMING_SNAKE_CASE_ : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size],device=_A ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ : List[str] = (torch.rand((self.batch_size, self.num_labels),device=_A ) > 0.5).long()
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1],),decoder_config=DetrConfig(
decoder_ffn_dim=128,num_queries=self.num_queries,decoder_attention_heads=2,d_model=self.mask_feature_size,),mask_feature_size=self.mask_feature_size,fpn_feature_size=self.mask_feature_size,num_channels=self.num_channels,num_labels=self.num_labels,)
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Any = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ : str = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ),len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ),len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ),config.decoder_config.decoder_layers )
def __UpperCamelCase ( self : Tuple,_A : Any,_A : Optional[Any],_A : Tuple,_A : Tuple=False ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = MaskFormerModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(pixel_values=_A,pixel_mask=_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape,(self.batch_size, self.num_queries, self.mask_feature_size),)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A,_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : Optional[int],_A : List[Any],_A : Any,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(_A : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4),)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(pixel_values=_A,pixel_mask=_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
comm_check_on_output(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(
pixel_values=_A,pixel_mask=_A,mask_labels=_A,class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape,torch.Size([1] ) )
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict = ConfigTester(self,config_class=_A,has_text_modality=_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A,**_A,output_hidden_states=_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1],_A )
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"pixel_values": torch.randn((2, 3, *size),device=_A ),
"mask_labels": torch.randn((2, 10, *size),device=_A ),
"class_labels": torch.zeros(2,10,device=_A ).long(),
}
SCREAMING_SNAKE_CASE_ : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**_A )
self.assertTrue(outputs.loss is not None )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A,**_A,output_hidden_states=_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(_A ).to(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(**_A,output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE_ : int = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
model.to(_A )
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A,mask_labels=_A,class_labels=_A ).loss
loss.backward()
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
model.to(_A )
model.train()
SCREAMING_SNAKE_CASE_ : Tuple = model(_A,mask_labels=_A,class_labels=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCamelCase : Dict = 1E-4
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(_A,return_tensors="pt" ).to(_A )
SCREAMING_SNAKE_CASE_ : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A,(1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3],_A,atol=_A ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3],_A,atol=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3],_A,atol=_A ) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Dict = prepare_img()
SCREAMING_SNAKE_CASE_ : str = image_processor(_A,return_tensors="pt" ).to(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A,(1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**_A )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4),)
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3],_A,atol=_A ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3],_A,atol=_A ) )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_A )
.eval()
)
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(_A,return_tensors="pt" ).to(_A )
SCREAMING_SNAKE_CASE_ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A,(1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**_A )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4),)
SCREAMING_SNAKE_CASE_ : Optional[int] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE_ : str = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3],_A,atol=_A ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3],_A,atol=_A ) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )],segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )],return_tensors="pt",)
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs["pixel_values"].to(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [el.to(_A ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE_ : int = [el.to(_A ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**_A )
self.assertTrue(outputs.loss is not None )
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a__ :
def __init__( self : Optional[int],_A : Tuple,_A : List[Any]=13,_A : Tuple=7,_A : int=True,_A : List[str]=True,_A : Optional[int]=True,_A : Tuple=True,_A : str=99,_A : Optional[Any]=[1, 1, 2],_A : List[str]=1,_A : Tuple=32,_A : Any=4,_A : Optional[Any]=8,_A : Optional[Any]=37,_A : Any="gelu_new",_A : Tuple=0.1,_A : int=0.1,_A : Optional[int]=0.0,_A : Optional[int]=512,_A : Union[str, Any]=3,_A : Optional[Any]=0.02,_A : Optional[Any]=3,_A : List[Any]=4,_A : str=None,_A : str=False,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Any = seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = block_sizes
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_decoder_layers
SCREAMING_SNAKE_CASE_ : Any = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = n_head
SCREAMING_SNAKE_CASE_ : Tuple = d_head
SCREAMING_SNAKE_CASE_ : int = d_inner
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : Tuple = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE_ : Tuple = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE_ : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE_ : str = self.num_hidden_layers + 2
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FunnelConfig(
vocab_size=self.vocab_size,block_sizes=self.block_sizes,num_decoder_layers=self.num_decoder_layers,d_model=self.d_model,n_head=self.n_head,d_head=self.d_head,d_inner=self.d_inner,hidden_act=self.hidden_act,hidden_dropout=self.hidden_dropout,attention_dropout=self.attention_dropout,activation_dropout=self.activation_dropout,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_std=self.initializer_std,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int,_A : Tuple,_A : List[str],_A : Optional[Any],_A : Any,_A : Optional[int],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TFFunnelModel(config=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : str = model(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelModel(config=_A )
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModel(config=_A )
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Any,_A : Optional[Any],_A : List[Any],_A : List[str],_A : Optional[int],_A : int,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelBaseModel(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = TFFunnelBaseModel(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = TFFunnelBaseModel(config=_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
def __UpperCamelCase ( self : Dict,_A : Tuple,_A : Dict,_A : List[Any],_A : str,_A : int,_A : Optional[int],_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFFunnelForPreTraining(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Any,_A : List[Any],_A : Tuple,_A : Tuple,_A : Dict,_A : Any,_A : Optional[int],_A : Union[str, Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : Optional[int],_A : Optional[int],_A : List[str],_A : Optional[int],_A : Optional[int],_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE_ : int = TFFunnelForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : List[Any],_A : Dict,_A : Dict,_A : List[str],_A : Tuple,_A : str,_A : str,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_choices
SCREAMING_SNAKE_CASE_ : int = TFFunnelForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : str = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Dict = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Dict = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any],_A : str,_A : str,_A : List[str],_A : Union[str, Any],_A : Dict,_A : List[Any],_A : int,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = TFFunnelForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any],_A : Dict,_A : List[str],_A : str,_A : List[str],_A : List[str],_A : str,_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFFunnelForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self,config_class=_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@require_tf
class a__ ( A__ , unittest.TestCase ):
A = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A = False
A = False
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFFunnelModelTester(self,base=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self,config_class=_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
import random
class a__ :
@staticmethod
def __UpperCamelCase ( _A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [ord(_A ) for i in text]
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
for i in plain:
SCREAMING_SNAKE_CASE_ : Any = random.randint(1,300 )
SCREAMING_SNAKE_CASE_ : int = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def __UpperCamelCase ( _A : list[int],_A : list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE_ : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : List[Any] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = LEDTokenizer
A = LEDTokenizerFast
A = True
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_ : Tuple = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
def __UpperCamelCase ( self : Tuple,**_A : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : str,**_A : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Tuple,_A : Any ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ : List[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(_A,max_length=len(_A ),padding=_A,return_tensors="pt" )
self.assertIsInstance(_A,_A )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_A,_A )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : int = tokenizer(_A,padding=_A,return_tensors="pt" )
self.assertIn("input_ids",_A )
self.assertIn("attention_mask",_A )
self.assertNotIn("labels",_A )
self.assertNotIn("decoder_attention_mask",_A )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(text_target=_A,max_length=32,padding="max_length",return_tensors="pt" )
self.assertEqual(32,targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(
["I am a small frog" * 1024, "I am a small frog"],padding=_A,truncation=_A,return_tensors="pt" )
self.assertIsInstance(_A,_A )
self.assertEqual(batch.input_ids.shape,(2, 5122) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["A long paragraph for summarization."]
SCREAMING_SNAKE_CASE_ : str = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(text_target=_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs["input_ids"]
SCREAMING_SNAKE_CASE_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : str = ["Summary of the text.", "Another summary."]
SCREAMING_SNAKE_CASE_ : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(_A,padding=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = [[0] * len(_A ) for x in encoded_output["input_ids"]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.pad(_A )
self.assertSequenceEqual(outputs["global_attention_mask"],_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A,**_A )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class.from_pretrained(_A,**_A )
SCREAMING_SNAKE_CASE_ : Any = "A, <mask> AllenNLP sentence."
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.encode_plus(_A,add_special_tokens=_A,return_token_type_ids=_A )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.encode_plus(_A,add_special_tokens=_A,return_token_type_ids=_A )
self.assertEqual(sum(tokens_r["token_type_ids"] ),sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ),sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ),)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
SCREAMING_SNAKE_CASE_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"],[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"],[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = PegasusTokenizer
A = PegasusTokenizerFast
A = True
A = True
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Any = PegasusTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def __UpperCamelCase ( self : Union[str, Any],**_A : Optional[int] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "</s>"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"<pad>" )
self.assertEqual(vocab_keys[1],"</s>" )
self.assertEqual(vocab_keys[-1],"v" )
self.assertEqual(len(_A ),1103 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size,1103 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
SCREAMING_SNAKE_CASE_ : List[str] = rust_tokenizer([raw_input_str],return_tensors=_A,add_special_tokens=_A ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Tuple = py_tokenizer([raw_input_str],return_tensors=_A,add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
SCREAMING_SNAKE_CASE_ : List[str] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE_ : Dict = tokenizer([raw_input_str],return_tensors=_A ).input_ids[0]
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE_ : str = "To ensure a smooth flow of bank resolutions."
SCREAMING_SNAKE_CASE_ : Dict = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer([raw_input_str],return_tensors=_A ).input_ids[0]
self.assertListEqual(_A,_A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["This is going to be way too long." * 150, "short example"]
SCREAMING_SNAKE_CASE_ : Any = ["not super long but more than 5 tokens", "tiny"]
SCREAMING_SNAKE_CASE_ : Tuple = self._large_tokenizer(_A,padding=_A,truncation=_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._large_tokenizer(
text_target=_A,max_length=5,padding=_A,truncation=_A,return_tensors="pt" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {"input_ids": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name="google/bigbird-pegasus-large-arxiv",revision="ba85d0851d708441f91440d509690f1ab6353415",)
@require_sentencepiece
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = PegasusTokenizer
A = PegasusTokenizerFast
A = True
A = True
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Optional[Any] = PegasusTokenizer(_A,offset=0,mask_token_sent=_A,mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def __UpperCamelCase ( self : List[str],**_A : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : List[str],_A : str ):
"""simple docstring"""
return ("This is a test", "This is a test")
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[Any] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer([raw_input_str],return_tensors=_A,add_special_tokens=_A ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Dict = py_tokenizer([raw_input_str],return_tensors=_A,add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A,_A )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["This is going to be way too long." * 1000, "short example"]
SCREAMING_SNAKE_CASE_ : Dict = ["not super long but more than 5 tokens", "tiny"]
SCREAMING_SNAKE_CASE_ : int = self._large_tokenizer(_A,padding=_A,truncation=_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : List[Any] = self._large_tokenizer(
text_target=_A,max_length=5,padding=_A,truncation=_A,return_tensors="pt" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(_A ).input_ids
self.assertListEqual(
_A,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1],)
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__lowerCamelCase : Dict = parser.parse_args()
if args.model_type == "bert":
__lowerCamelCase : Any = BertForMaskedLM.from_pretrained(args.model_name)
__lowerCamelCase : Any = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__lowerCamelCase : Dict = model.state_dict()
__lowerCamelCase : str = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCamelCase : int = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowerCamelCase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowerCamelCase : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowerCamelCase : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowerCamelCase : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowerCamelCase : Optional[Any] = state_dict['''cls.predictions.decoder.weight''']
__lowerCamelCase : Optional[int] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCamelCase : str = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowerCamelCase : Optional[Any] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Tuple = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowerCamelCase : Tuple = {'''allegro/herbert-base-cased''': 5_14}
__lowerCamelCase : Tuple = {}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = HerbertTokenizer
def __init__( self : Tuple,_A : int=None,_A : Optional[int]=None,_A : Optional[int]=None,_A : List[Any]="<s>",_A : int="<unk>",_A : Optional[Any]="<pad>",_A : int="<mask>",_A : Dict="</s>",**_A : Optional[int],):
"""simple docstring"""
super().__init__(
_A,_A,tokenizer_file=_A,cls_token=_A,unk_token=_A,pad_token=_A,mask_token=_A,sep_token=_A,**_A,)
def __UpperCamelCase ( self : List[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Dict,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Union[str, Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
__lowerCamelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase ) )
def _snake_case ( ):
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
from __future__ import annotations
__lowerCamelCase : Any = '''#'''
class a__ :
def __init__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict = {}
def __UpperCamelCase ( self : List[str],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[Any] = trie[char]
SCREAMING_SNAKE_CASE_ : List[Any] = True
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ : Dict = trie[char]
else:
return []
return self._elements(_A )
def __UpperCamelCase ( self : List[Any],_A : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ : Optional[int] = [" "] if c == END else [(c + s) for s in self._elements(_A )]
result.extend(_A )
return tuple(_A )
__lowerCamelCase : List[Any] = Trie()
__lowerCamelCase : Union[str, Any] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def _snake_case ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
SCREAMING_SNAKE_CASE_ : Dict = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !"
SCREAMING_SNAKE_CASE_ : Any = model(_A )["last_hidden_state"]
SCREAMING_SNAKE_CASE_ : Tuple = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape,_A )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],dtype=tf.floataa,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__lowerCamelCase : Dict = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _snake_case ( lowerCAmelCase : Dict ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
__lowerCamelCase : Tuple = parser.parse_args()
if args.check_lib:
__lowerCamelCase : Optional[Any] = importlib.import_module('''transformers''')
__lowerCamelCase : str = Path(transformers_module.__file__).parent
else:
__lowerCamelCase : Tuple = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__lowerCamelCase : Dict = logging.get_logger(__name__)
class a__ ( A__ ):
def __init__( self : Optional[Any],*_A : List[Any],**_A : int ):
"""simple docstring"""
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead.",_A,)
super().__init__(*_A,**_A )
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = s.rsplit(lowerCAmelCase , lowerCAmelCase )
return new.join(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : int = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
SCREAMING_SNAKE_CASE_ : int = rreplace(lowerCAmelCase , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = rreplace(lowerCAmelCase , ".b" , ".bias" , 1 )
SCREAMING_SNAKE_CASE_ : Dict = value.float()
return upgrade
@torch.no_grad()
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=True ):
"""simple docstring"""
from dall_e import Encoder
SCREAMING_SNAKE_CASE_ : Dict = Encoder()
if os.path.exists(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = torch.load(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase )
if config_path is not None:
SCREAMING_SNAKE_CASE_ : List[str] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlavaImageCodebookConfig()
SCREAMING_SNAKE_CASE_ : str = FlavaImageCodebook(lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder.state_dict()
SCREAMING_SNAKE_CASE_ : str = upgrade_state_dict(lowerCAmelCase )
hf_model.load_state_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = hf_model.state_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = count_parameters(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = count_parameters(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCamelCase : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any],_A : Any,_A : Union[str, Any]=7,_A : Optional[Any]=3,_A : Optional[int]=10,_A : Optional[int]=18,_A : Optional[Any]=30,_A : Optional[Any]=400,_A : int=True,_A : Dict=None,_A : Any=True,_A : List[Any]=[0.5, 0.5, 0.5],_A : Any=[0.5, 0.5, 0.5],_A : str=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = size if size is not None else {"shortest_edge": 18}
SCREAMING_SNAKE_CASE_ : str = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Any = num_channels
SCREAMING_SNAKE_CASE_ : Any = num_frames
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Any = min_resolution
SCREAMING_SNAKE_CASE_ : Dict = max_resolution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE_ : int = size
SCREAMING_SNAKE_CASE_ : Dict = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean
SCREAMING_SNAKE_CASE_ : Tuple = image_std
SCREAMING_SNAKE_CASE_ : List[str] = crop_size
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__ ( A__ , unittest.TestCase ):
A = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = VivitImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A,"image_mean" ) )
self.assertTrue(hasattr(_A,"image_std" ) )
self.assertTrue(hasattr(_A,"do_normalize" ) )
self.assertTrue(hasattr(_A,"do_resize" ) )
self.assertTrue(hasattr(_A,"do_center_crop" ) )
self.assertTrue(hasattr(_A,"size" ) )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size,{"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict,size=42,crop_size=84 )
self.assertEqual(image_processor.size,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size,{"height": 84, "width": 84} )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
SCREAMING_SNAKE_CASE_ : List[str] = prepare_video_inputs(self.image_processor_tester,equal_resolution=_A )
for video in video_inputs:
self.assertIsInstance(_A,_A )
self.assertIsInstance(video[0],Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : int = image_processing(video_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : str = prepare_video_inputs(self.image_processor_tester,equal_resolution=_A,numpify=_A )
for video in video_inputs:
self.assertIsInstance(_A,_A )
self.assertIsInstance(video[0],np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(video_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict = prepare_video_inputs(self.image_processor_tester,equal_resolution=_A,torchify=_A )
for video in video_inputs:
self.assertIsInstance(_A,_A )
self.assertIsInstance(video[0],torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(video_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
import math
__lowerCamelCase : List[str] = 10
__lowerCamelCase : str = 7
__lowerCamelCase : Any = BALLS_PER_COLOUR * NUM_COLOURS
def _snake_case ( lowerCAmelCase : int = 2_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = math.comb(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
from math import factorial
class a__ :
def __init__( self : List[str],_A : Tuple,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = real
if isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Tuple = [1] * rank
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = rank
def __repr__( self : List[Any] ):
"""simple docstring"""
return (
F'{self.real}+'
F'{"+".join(str(_A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real,_A )
def __add__( self : Any,_A : List[Any] ):
"""simple docstring"""
if not isinstance(_A,_A ):
return Dual(self.real + other,self.duals )
SCREAMING_SNAKE_CASE_ : str = self.duals.copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = other.duals.copy()
if len(_A ) > len(_A ):
o_dual.extend([1] * (len(_A ) - len(_A )) )
elif len(_A ) < len(_A ):
s_dual.extend([1] * (len(_A ) - len(_A )) )
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(_A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real,_A )
A = __add__
def __sub__( self : Any,_A : Union[str, Any] ):
"""simple docstring"""
return self + other * -1
def __mul__( self : Tuple,_A : Any ):
"""simple docstring"""
if not isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : int = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other,_A )
SCREAMING_SNAKE_CASE_ : str = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real,_A )
A = __mul__
def __truediv__( self : Optional[Any],_A : Dict ):
"""simple docstring"""
if not isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Dict = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other,_A )
raise ValueError
def __floordiv__( self : List[str],_A : Any ):
"""simple docstring"""
if not isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other,_A )
raise ValueError
def __pow__( self : Optional[Any],_A : Any ):
"""simple docstring"""
if n < 0 or isinstance(_A,_A ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
"""simple docstring"""
if not callable(lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
SCREAMING_SNAKE_CASE_ : Tuple = Dual(lowerCAmelCase , 1 )
SCREAMING_SNAKE_CASE_ : Dict = func(lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase : Tuple = logging.getLogger(__name__)
class a__ ( A__ ):
A = 'summarization'
A = ['loss']
A = ROUGE_KEYS
A = 'rouge2'
def __init__( self : Dict,_A : Any,**_A : List[Any] ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ : Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(_A,num_labels=_A,mode=self.mode,**_A )
use_task_specific_params(self.model,"summarization" )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(self.output_dir ) / "metrics.json"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams,self.hparams_save_path )
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = defaultdict(_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.config.model_type
SCREAMING_SNAKE_CASE_ : int = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
SCREAMING_SNAKE_CASE_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ : int = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ : str = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ : List[str] = get_git_info()["repo_sha"]
SCREAMING_SNAKE_CASE_ : List[Any] = hparams.num_workers
SCREAMING_SNAKE_CASE_ : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer,_A ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ : List[str] = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ : Dict = (
SeqaSeqDataset if hasattr(self.tokenizer,"prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.config.max_length
SCREAMING_SNAKE_CASE_ : Dict = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __UpperCamelCase ( self : int,_A : Dict[str, torch.Tensor] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(_A,Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()},Path(self.output_dir ) / "tok_batch.json" )
SCREAMING_SNAKE_CASE_ : str = True
return readable_batch
def __UpperCamelCase ( self : str,_A : List[str],**_A : List[Any] ):
"""simple docstring"""
return self.model(_A,**_A )
def __UpperCamelCase ( self : Any,_A : List[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.batch_decode(
_A,skip_special_tokens=_A,clean_up_tokenization_spaces=_A )
return lmap(str.strip,_A )
def __UpperCamelCase ( self : Union[str, Any],_A : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = batch["input_ids"], batch["attention_mask"]
SCREAMING_SNAKE_CASE_ : int = batch["labels"]
if isinstance(self.model,_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ : str = shift_tokens_right(_A,_A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ : str = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self(_A,attention_mask=_A,decoder_input_ids=_A,use_cache=_A )
SCREAMING_SNAKE_CASE_ : Dict = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ : str = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ : Any = ce_loss_fct(lm_logits.view(-1,lm_logits.shape[-1] ),tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ : Tuple = nn.functional.log_softmax(_A,dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = label_smoothed_nll_loss(
_A,_A,self.hparams.label_smoothing,ignore_index=_A )
return (loss,)
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._step(_A )
SCREAMING_SNAKE_CASE_ : Any = dict(zip(self.loss_names,_A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ : int = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ : Any = batch["input_ids"].shape[0]
SCREAMING_SNAKE_CASE_ : str = batch["input_ids"].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ : Optional[int] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __UpperCamelCase ( self : List[Any],_A : Any,_A : Optional[int] ):
"""simple docstring"""
return self._generative_step(_A )
def __UpperCamelCase ( self : List[str],_A : str,_A : Optional[Any]="val" ):
"""simple docstring"""
self.step_count += 1
SCREAMING_SNAKE_CASE_ : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ : List[str] = losses["loss"]
SCREAMING_SNAKE_CASE_ : Tuple = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
SCREAMING_SNAKE_CASE_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ : torch.FloatTensor = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ : Any = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def __UpperCamelCase ( self : Dict,_A : Optional[Any],_A : Dict ):
"""simple docstring"""
return calculate_rouge(_A,_A )
def __UpperCamelCase ( self : int,_A : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ : str = self.model.generate(
batch["input_ids"],attention_mask=batch["attention_mask"],use_cache=_A,decoder_start_token_id=self.decoder_start_token_id,num_beams=self.eval_beams,max_length=self.eval_max_length,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (time.time() - ta) / batch["input_ids"].shape[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.ids_to_clean_text(batch["labels"] )
SCREAMING_SNAKE_CASE_ : List[str] = self._step(_A )
SCREAMING_SNAKE_CASE_ : Dict = dict(zip(self.loss_names,_A ) )
SCREAMING_SNAKE_CASE_ : Dict = self.calc_generative_metrics(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = np.mean(lmap(_A,_A ) )
base_metrics.update(gen_time=_A,gen_len=_A,preds=_A,target=_A,**_A )
return base_metrics
def __UpperCamelCase ( self : List[Any],_A : Dict,_A : Any ):
"""simple docstring"""
return self._generative_step(_A )
def __UpperCamelCase ( self : Optional[int],_A : List[Any] ):
"""simple docstring"""
return self.validation_epoch_end(_A,prefix="test" )
def __UpperCamelCase ( self : Optional[Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ : List[str] = self.dataset_class(
self.tokenizer,type_path=_A,n_obs=_A,max_target_length=_A,**self.dataset_kwargs,)
return dataset
def __UpperCamelCase ( self : List[str],_A : str,_A : int,_A : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ : List[Any] = dataset.make_sortish_sampler(_A,distributed=self.hparams.gpus > 1 )
return DataLoader(
_A,batch_size=_A,collate_fn=dataset.collate_fn,shuffle=_A,num_workers=self.num_workers,sampler=_A,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ : Optional[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch,distributed=self.hparams.gpus > 1 )
return DataLoader(
_A,batch_sampler=_A,collate_fn=dataset.collate_fn,num_workers=self.num_workers,)
else:
return DataLoader(
_A,batch_size=_A,collate_fn=dataset.collate_fn,shuffle=_A,num_workers=self.num_workers,sampler=_A,)
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dataloader("train",batch_size=self.hparams.train_batch_size,shuffle=_A )
return dataloader
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.get_dataloader("val",batch_size=self.hparams.eval_batch_size )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self.get_dataloader("test",batch_size=self.hparams.eval_batch_size )
@staticmethod
def __UpperCamelCase ( _A : List[Any],_A : List[Any] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_A,_A )
add_generic_args(_A,_A )
parser.add_argument(
"--max_source_length",default=1024,type=_A,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
),)
parser.add_argument(
"--max_target_length",default=56,type=_A,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
),)
parser.add_argument(
"--val_max_target_length",default=142,type=_A,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
),)
parser.add_argument(
"--test_max_target_length",default=142,type=_A,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
),)
parser.add_argument("--freeze_encoder",action="store_true" )
parser.add_argument("--freeze_embeds",action="store_true" )
parser.add_argument("--sortish_sampler",action="store_true",default=_A )
parser.add_argument("--overwrite_output_dir",action="store_true",default=_A )
parser.add_argument("--max_tokens_per_batch",type=_A,default=_A )
parser.add_argument("--logger_name",type=_A,choices=["default", "wandb", "wandb_shared"],default="default" )
parser.add_argument("--n_train",type=_A,default=-1,required=_A,help="# examples. -1 means use all." )
parser.add_argument("--n_val",type=_A,default=500,required=_A,help="# examples. -1 means use all." )
parser.add_argument("--n_test",type=_A,default=-1,required=_A,help="# examples. -1 means use all." )
parser.add_argument(
"--task",type=_A,default="summarization",required=_A,help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing",type=_A,default=0.0,required=_A )
parser.add_argument("--src_lang",type=_A,default="",required=_A )
parser.add_argument("--tgt_lang",type=_A,default="",required=_A )
parser.add_argument("--eval_beams",type=_A,default=_A,required=_A )
parser.add_argument(
"--val_metric",type=_A,default=_A,required=_A,choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length",type=_A,default=_A,help="never generate more than n tokens" )
parser.add_argument("--save_top_k",type=_A,default=1,required=_A,help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience",type=_A,default=-1,required=_A,help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
),)
return parser
class a__ ( A__ ):
A = 'translation'
A = ['loss']
A = ['bleu']
A = 'bleu'
def __init__( self : str,_A : Optional[int],**_A : int ):
"""simple docstring"""
super().__init__(_A,**_A )
SCREAMING_SNAKE_CASE_ : List[Any] = hparams.src_lang
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hparams.tgt_lang
def __UpperCamelCase ( self : Any,_A : Optional[Any],_A : str ):
"""simple docstring"""
return calculate_bleu(_A,_A )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=lowerCAmelCase )
check_output_dir(lowerCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ : SummarizationModule = SummarizationModule(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : SummarizationModule = TranslationModule(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
SCREAMING_SNAKE_CASE_ : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ : Tuple = os.environ.get("WANDB_PROJECT" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = WandbLogger(name=model.output_dir.name , project=lowerCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ : List[str] = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ : Dict = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[int] = args.val_metric == "loss"
SCREAMING_SNAKE_CASE_ : pl.Trainer = generic_train(
lowerCAmelCase , lowerCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowerCAmelCase ) , early_stopping_callback=lowerCAmelCase , logger=lowerCAmelCase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ : Dict = ""
SCREAMING_SNAKE_CASE_ : Dict = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=lowerCAmelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ : List[str] = checkpoints[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
__lowerCamelCase : Union[str, Any] = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase : List[str] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Tuple = parser.parse_args()
main(args)
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''spm_char.model'''}
__lowerCamelCase : Dict = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
__lowerCamelCase : List[str] = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : int,_A : Tuple,_A : str="<s>",_A : int="</s>",_A : int="<unk>",_A : Tuple="<pad>",_A : Optional[Dict[str, Any]] = None,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A,eos_token=_A,unk_token=_A,pad_token=_A,sp_model_kwargs=self.sp_model_kwargs,**_A,)
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
return state
def __setstate__( self : Dict,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : List[str],_A : str ):
"""simple docstring"""
return self.sp_model.encode(_A,out_type=_A )
def __UpperCamelCase ( self : str,_A : int ):
"""simple docstring"""
return self.sp_model.piece_to_id(_A )
def __UpperCamelCase ( self : str,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.IdToPiece(_A )
return token
def __UpperCamelCase ( self : Any,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __UpperCamelCase ( self : str,_A : List[Any],_A : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(_A )) + suffix_ones
return ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __UpperCamelCase ( self : str,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A,"wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _snake_case ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
SCREAMING_SNAKE_CASE_ : str = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _snake_case ( ):
"""simple docstring"""
assert _test_patching.open is open
SCREAMING_SNAKE_CASE_ : Any = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , lowerCAmelCase ):
pass
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , lowerCAmelCase ) is None
with patch_submodule(_test_patching , "len" , lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "__test_patch_submodule_start_and_stop_mock__"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_submodule(_test_patching , "open" , lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _snake_case ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
SCREAMING_SNAKE_CASE_ : Tuple = "__test_patch_submodule_successive_join__"
SCREAMING_SNAKE_CASE_ : List[Any] = "__test_patch_submodule_successive_dirname__"
SCREAMING_SNAKE_CASE_ : List[str] = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , lowerCAmelCase ):
with patch_submodule(_test_patching , "os.rename" , lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.join" , lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , lowerCAmelCase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , lowerCAmelCase ):
pass
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : Any = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
"""simple docstring"""
def run_func(lowerCAmelCase : int ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Tuple , **lowerCAmelCase : Any ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = random.Random()
SCREAMING_SNAKE_CASE_ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a__ ( A__ ):
A = 42
A = 42
A = "TensorFlow"
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return tf.__version__
def __UpperCamelCase ( self : int,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : Any = self._prepare_inference_func(_A,_A,_A )
return self._measure_speed(_inference )
def __UpperCamelCase ( self : str,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_train_func(_A,_A,_A )
return self._measure_speed(_train )
def __UpperCamelCase ( self : Dict,_A : str,_A : int,_A : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx],_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : str = self._prepare_inference_func(_A,_A,_A )
return self._measure_memory(_inference )
def __UpperCamelCase ( self : Optional[Any],_A : str,_A : int,_A : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx],_A )
SCREAMING_SNAKE_CASE_ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : str = self._prepare_train_func(_A,_A,_A )
return self._measure_memory(_train )
def __UpperCamelCase ( self : int,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE_ : Dict = (
hasattr(_A,"architectures" )
and isinstance(config.architectures,_A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE_ : List[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE_ : List[Any] = __import__("transformers",fromlist=[model_class] )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = TF_MODEL_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE_ : Tuple = config.vocab_size if hasattr(_A,"vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ : str = random_input_ids(_A,_A,_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_decoder_forward():
return model(_A,decoder_input_ids=_A,training=_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_forward():
return model(_A,training=_A )
SCREAMING_SNAKE_CASE_ : Tuple = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCamelCase ( self : Dict,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE_ : Dict = (
hasattr(_A,"architectures" )
and isinstance(config.architectures,_A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE_ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE_ : Tuple = __import__("transformers",fromlist=[model_class] )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE_ : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE_ : Any = config.vocab_size if hasattr(_A,"vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ : int = random_input_ids(_A,_A,_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_decoder_train():
SCREAMING_SNAKE_CASE_ : Dict = model(_A,decoder_input_ids=_A,labels=_A,training=_A )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.gradients(_A,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_train():
SCREAMING_SNAKE_CASE_ : Tuple = model(_A,labels=_A,training=_A )[0]
SCREAMING_SNAKE_CASE_ : Tuple = tf.gradients(_A,model.trainable_variables )
return gradients
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCamelCase ( self : int,_A : Any ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_A,repeat=1,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
SCREAMING_SNAKE_CASE_ : Optional[int] = timeit.repeat(
_A,repeat=self.args.repeat,number=10,)
return min(_A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def __UpperCamelCase ( self : List[str],_A : Callable[[], None] ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
SCREAMING_SNAKE_CASE_ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
SCREAMING_SNAKE_CASE_ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
SCREAMING_SNAKE_CASE_ : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(_A )
SCREAMING_SNAKE_CASE_ : Any = meminfo.used
SCREAMING_SNAKE_CASE_ : List[Any] = Memory(_A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = measure_peak_memory_cpu(_A )
SCREAMING_SNAKE_CASE_ : str = Memory(_A ) if isinstance(_A,_A ) else memory_bytes
if self.args.trace_memory_line_by_line:
SCREAMING_SNAKE_CASE_ : Optional[int] = stop_memory_tracing(_A )
if memory is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = summary.total
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCamelCase : Tuple = None
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : List[str] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
__lowerCamelCase : Optional[Any] = '''▁'''
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = BigBirdTokenizer
A = ['input_ids', 'attention_mask']
A = []
def __init__( self : Union[str, Any],_A : Any=None,_A : Any=None,_A : str="<unk>",_A : str="<s>",_A : int="</s>",_A : Union[str, Any]="<pad>",_A : Dict="[SEP]",_A : int="[MASK]",_A : int="[CLS]",**_A : Any,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
_A,tokenizer_file=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False if not self.vocab_file else True
def __UpperCamelCase ( self : Union[str, Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Union[str, Any],_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : List[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : str,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file,_A )
return (out_vocab_file,)
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
def _snake_case ( lowerCAmelCase : int = 3 , lowerCAmelCase : int = 7 , lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE_ : List[str] = current_numerator
SCREAMING_SNAKE_CASE_ : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe",safety_checker=_A,cache_dir=_A )
SCREAMING_SNAKE_CASE_ : int = [t[-1] for t in os.walk(os.path.join(_A,os.listdir(_A )[0],"snapshots" ) )]
SCREAMING_SNAKE_CASE_ : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe",safety_checker=_A )
SCREAMING_SNAKE_CASE_ : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Tuple = 4
SCREAMING_SNAKE_CASE_ : int = jax.device_count()
SCREAMING_SNAKE_CASE_ : List[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : int = pipeline.prepare_inputs(_A )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Any = replicate(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = jax.random.split(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = shard(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline(_A,_A,_A,_A,jit=_A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(_A,dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_A ) == num_samples
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="flax",safety_checker=_A )
SCREAMING_SNAKE_CASE_ : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 50
SCREAMING_SNAKE_CASE_ : int = jax.device_count()
SCREAMING_SNAKE_CASE_ : str = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : str = pipeline.prepare_inputs(_A )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Union[str, Any] = replicate(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.split(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = shard(_A )
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(_A,_A,_A,_A,jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(_A,dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,safety_checker=_A )
SCREAMING_SNAKE_CASE_ : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
SCREAMING_SNAKE_CASE_ : Dict = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 50
SCREAMING_SNAKE_CASE_ : List[str] = jax.device_count()
SCREAMING_SNAKE_CASE_ : Optional[int] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : int = replicate(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.split(_A,_A )
SCREAMING_SNAKE_CASE_ : str = shard(_A )
SCREAMING_SNAKE_CASE_ : Tuple = pipeline(_A,_A,_A,_A,jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(_A,dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Tuple = 50
SCREAMING_SNAKE_CASE_ : int = jax.device_count()
SCREAMING_SNAKE_CASE_ : List[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Optional[int] = pipeline.prepare_inputs(_A )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Dict = replicate(_A )
SCREAMING_SNAKE_CASE_ : Dict = jax.random.split(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = shard(_A )
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(_A,_A,_A,_A,jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(_A,dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxDDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule="scaled_linear",set_alpha_to_one=_A,steps_offset=1,)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,scheduler=_A,safety_checker=_A,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.create_state()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_state
SCREAMING_SNAKE_CASE_ : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 50
SCREAMING_SNAKE_CASE_ : List[str] = jax.device_count()
SCREAMING_SNAKE_CASE_ : Dict = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : List[str] = pipeline.prepare_inputs(_A )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Union[str, Any] = replicate(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = jax.random.split(_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = shard(_A )
SCREAMING_SNAKE_CASE_ : str = pipeline(_A,_A,_A,_A,jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(_A,dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE_ : Any = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ),_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,safety_checker=_A,)
SCREAMING_SNAKE_CASE_ : str = replicate(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline.prepare_inputs(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = shard(_A )
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(_A,_A,_A,jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,safety_checker=_A,use_memory_efficient_attention=_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = replicate(_A )
SCREAMING_SNAKE_CASE_ : Any = pipeline.prepare_inputs(_A )
SCREAMING_SNAKE_CASE_ : int = shard(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(_A,_A,_A,jit=_A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase : Any = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class a__ ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TOKEN
HfFolder.save_token(_A )
@classmethod
def __UpperCamelCase ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token,repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id="test-dynamic-config" )
except HTTPError:
pass
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BertConfig(
vocab_size=99,hidden_size=32,num_hidden_layers=5,num_attention_heads=4,intermediate_size=37 )
config.push_to_hub("test-config",use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : int = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A,getattr(_A,_A ) )
# Reset repo
delete_repo(token=self._token,repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A,repo_id="test-config",push_to_hub=_A,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A,getattr(_A,_A ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig(
vocab_size=99,hidden_size=32,num_hidden_layers=5,num_attention_heads=4,intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org",use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : List[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A,getattr(_A,_A ) )
# Reset repo
delete_repo(token=self._token,repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A,repo_id="valid_org/test-config-org",push_to_hub=_A,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A,getattr(_A,_A ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : Tuple = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config",use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map,{"AutoConfig": "custom_configuration.CustomConfig"} )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config',trust_remote_code=_A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__,"CustomConfig" )
self.assertEqual(new_config.attribute,42 )
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
SCREAMING_SNAKE_CASE_ : Tuple = c.n_embd + 1 # int
SCREAMING_SNAKE_CASE_ : Any = c.resid_pdrop + 1.0 # float
SCREAMING_SNAKE_CASE_ : List[str] = not c.scale_attn_weights # bool
SCREAMING_SNAKE_CASE_ : Any = c.summary_type + "foo" # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_A,c.n_embd,"mismatch for key: n_embd" )
self.assertEqual(_A,c.resid_pdrop,"mismatch for key: resid_pdrop" )
self.assertEqual(_A,c.scale_attn_weights,"mismatch for key: scale_attn_weights" )
self.assertEqual(_A,c.summary_type,"mismatch for key: summary_type" )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = PretrainedConfig()
SCREAMING_SNAKE_CASE_ : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_A,["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(_A,_A )]
if len(_A ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F' {", ".join(_A )}.' )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(_A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE_ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
SCREAMING_SNAKE_CASE_ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder",subfolder="bert" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = mock.Mock()
SCREAMING_SNAKE_CASE_ : List[Any] = 500
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = HTTPError
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request",return_value=_A ) as mock_head:
SCREAMING_SNAKE_CASE_ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : str = 2
json.dump(configuration.to_dict(),open(os.path.join(_A,"config.4.0.0.json" ),"w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
SCREAMING_SNAKE_CASE_ : List[Any] = AutoConfig.from_pretrained(_A )
self.assertEqual(new_configuration.hidden_size,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
SCREAMING_SNAKE_CASE_ : str = ["config.42.0.0.json"]
SCREAMING_SNAKE_CASE_ : Optional[int] = 768
configuration.save_pretrained(_A )
shutil.move(os.path.join(_A,"config.4.0.0.json" ),os.path.join(_A,"config.42.0.0.json" ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoConfig.from_pretrained(_A )
self.assertEqual(new_configuration.hidden_size,768 )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
SCREAMING_SNAKE_CASE_ : Dict = "v4.0.0"
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
_A,return_unused_kwargs=_A )
self.assertEqual(new_configuration.hidden_size,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_A,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
SCREAMING_SNAKE_CASE_ : Optional[int] = "v3.0.0"
SCREAMING_SNAKE_CASE_ : Tuple = old_transformers.models.auto.AutoConfig.from_pretrained(_A )
self.assertEqual(old_configuration.hidden_size,768 )
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : List[str] = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': 5_12,
'''google/realm-cc-news-pretrained-encoder''': 5_12,
'''google/realm-cc-news-pretrained-scorer''': 5_12,
'''google/realm-cc-news-pretrained-openqa''': 5_12,
'''google/realm-orqa-nq-openqa''': 5_12,
'''google/realm-orqa-nq-reader''': 5_12,
'''google/realm-orqa-wq-openqa''': 5_12,
'''google/realm-orqa-wq-reader''': 5_12,
}
__lowerCamelCase : Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = RealmTokenizer
def __init__( self : Any,_A : Tuple=None,_A : Union[str, Any]=None,_A : str=True,_A : Tuple="[UNK]",_A : List[str]="[SEP]",_A : List[str]="[PAD]",_A : int="[CLS]",_A : Dict="[MASK]",_A : str=True,_A : int=None,**_A : List[str],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase",_A ) != do_lower_case
or normalizer_state.get("strip_accents",_A ) != strip_accents
or normalizer_state.get("handle_chinese_chars",_A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_A,normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : int = do_lower_case
SCREAMING_SNAKE_CASE_ : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE_ : int = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : str = normalizer_class(**_A )
SCREAMING_SNAKE_CASE_ : Tuple = do_lower_case
def __UpperCamelCase ( self : Tuple,_A : List[Any],**_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE_ : Dict = text
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("text_pair",_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop("return_tensors",_A )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(_A ):
if batch_text_pair is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_text_pair[idx]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = super().__call__(_A,_A,return_tensors=_A,**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = encoded_candidates.get("input_ids" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoded_candidates.get("attention_mask" )
SCREAMING_SNAKE_CASE_ : Optional[int] = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_A )
SCREAMING_SNAKE_CASE_ : Any = {key: item for key, item in output_data.items() if len(_A ) != 0}
return BatchEncoding(_A,tensor_type=_A )
def __UpperCamelCase ( self : Dict,_A : Optional[Any],_A : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Optional[int],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Any,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return getitem, k
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
return setitem, k, v
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
return delitem, k
def _snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : int , *lowerCAmelCase : Optional[int] ):
"""simple docstring"""
try:
return fun(lowerCAmelCase , *lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCamelCase : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
__lowerCamelCase : List[str] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
__lowerCamelCase : Optional[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
__lowerCamelCase : Optional[int] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
__lowerCamelCase : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCamelCase : List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = HashMap(initial_block_size=4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _run_operation(lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _run_operation(lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase )
assert my_res == py_res
assert str(lowerCAmelCase ) == str(lowerCAmelCase )
assert set(lowerCAmelCase ) == set(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def _snake_case ( ):
"""simple docstring"""
def is_public(lowerCAmelCase : str ) -> bool:
return not name.startswith("_" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {name for name in dir({} ) if is_public(lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ : Dict = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class a__ ( A__ ):
A = 'mgp-str'
def __init__( self : Optional[int],_A : str=[32, 128],_A : int=4,_A : Union[str, Any]=3,_A : List[str]=27,_A : str=38,_A : Optional[Any]=5_0257,_A : List[str]=3_0522,_A : Optional[int]=768,_A : str=12,_A : Tuple=12,_A : Optional[int]=4.0,_A : Dict=True,_A : Any=False,_A : Dict=1E-5,_A : int=0.0,_A : List[Any]=0.0,_A : Optional[Any]=0.0,_A : List[Any]=False,_A : str=0.02,**_A : str,):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : int = patch_size
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_token_length
SCREAMING_SNAKE_CASE_ : List[str] = num_character_labels
SCREAMING_SNAKE_CASE_ : Dict = num_bpe_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_wordpiece_labels
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = mlp_ratio
SCREAMING_SNAKE_CASE_ : Union[str, Any] = distilled
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = drop_rate
SCREAMING_SNAKE_CASE_ : str = qkv_bias
SCREAMING_SNAKE_CASE_ : Dict = attn_drop_rate
SCREAMING_SNAKE_CASE_ : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_aa_attentions
SCREAMING_SNAKE_CASE_ : Any = initializer_range
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
class a__ :
def __init__( self : List[str],_A : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE_ : List[Any] = max(_A )
SCREAMING_SNAKE_CASE_ : str = len(_A )
SCREAMING_SNAKE_CASE_ : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(range(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_parent(_A )
SCREAMING_SNAKE_CASE_ : Dict = self.get_parent(_A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE_ : str = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : str = src_parent
SCREAMING_SNAKE_CASE_ : Dict = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(self.max_set,_A )
return True
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE_ : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class a__ ( A__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A = Features({'question': Value('string' ), 'context': Value('string' )} )
A = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A = "question"
A = "context"
A = "answers"
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = AlbertTokenizer
A = AlbertTokenizerFast
A = True
A = True
A = True
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Optional[int] = AlbertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "this is a test"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "<pad>"
SCREAMING_SNAKE_CASE_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"<pad>" )
self.assertEqual(vocab_keys[1],"<unk>" )
self.assertEqual(vocab_keys[-1],"▁eloquent" )
self.assertEqual(len(_A ),3_0000 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size,3_0000 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(_A,add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : List[str] = rust_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AlbertTokenizer(_A,keep_accents=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_A,["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ),[48, 25, 21, 1289] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_A,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A,[31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."],)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AlbertTokenizer(_A )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode("sequence builders" )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode("multi-sequence build" )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A,_A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name="albert-base-v2",revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e",)
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowerCamelCase : Dict = logging.get_logger(__name__)
enable_full_determinism()
class a__ ( A__ , A__ , unittest.TestCase ):
A = UNetaDModel
A = 'sample'
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = (32, 32)
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return (3, 32, 32)
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return (3, 32, 32)
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
class a__ ( A__ , A__ , unittest.TestCase ):
A = UNetaDModel
A = 'sample'
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 4
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : Any = (32, 32)
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return (4, 32, 32)
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return (4, 32, 32)
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update",output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info["missing_keys"] ),0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda","This test is supposed to run on GPU" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update",output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda","This test is supposed to run on GPU" )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update",output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.randn(
1,model_accelerate.config.in_channels,model_accelerate.config.sample_size,model_accelerate.config.sample_size,generator=torch.manual_seed(0 ),)
SCREAMING_SNAKE_CASE_ : Optional[Any] = noise.to(_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ : int = model_accelerate(_A,_A )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update",output_loading_info=_A,low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_normal_load(_A,_A )["sample"]
assert torch_all_close(_A,_A,rtol=1E-3 )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ : List[str] = torch.randn(
1,model.config.in_channels,model.config.sample_size,model.config.sample_size,generator=torch.manual_seed(0 ),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise.to(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(_A,_A ).sample
SCREAMING_SNAKE_CASE_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A,_A,rtol=1E-3 ) )
class a__ ( A__ , A__ , unittest.TestCase ):
A = UNetaDModel
A = 'sample'
@property
def __UpperCamelCase ( self : Tuple,_A : str=(32, 32) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa,device=_A )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return (3, 32, 32)
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return (3, 32, 32)
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256",output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info["missing_keys"] ),0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_input
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ : Any = noise
SCREAMING_SNAKE_CASE_ : List[Any] = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(_A )
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Tuple = (256, 256)
SCREAMING_SNAKE_CASE_ : str = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A,_A ).sample
SCREAMING_SNAKE_CASE_ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A,_A,rtol=1E-2 ) )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(_A )
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE_ : int = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ : str = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A ).sample
SCREAMING_SNAKE_CASE_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A,_A,rtol=1E-2 ) )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import math
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
SCREAMING_SNAKE_CASE_ : int = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE_ : Optional[int] = sd.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace(".qkv_proj." , ".q_proj." )
SCREAMING_SNAKE_CASE_ : Dict = key.replace(".qkv_proj." , ".k_proj." )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace(".qkv_proj." , ".v_proj." )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = torch.split(lowerCAmelCase , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE_ : int = q
SCREAMING_SNAKE_CASE_ : List[Any] = k
SCREAMING_SNAKE_CASE_ : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = load_checkpoint(lowerCAmelCase )
if config is not None:
SCREAMING_SNAKE_CASE_ : List[str] = OPTConfig.from_pretrained(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = OPTConfig()
SCREAMING_SNAKE_CASE_ : Dict = OPTModel(lowerCAmelCase ).half().eval()
model.load_state_dict(lowerCAmelCase )
# Check results
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
def __init__( self : Any,_A : str,_A : Union[str, Any]=13,_A : Dict=30,_A : Optional[Any]=2,_A : List[Any]=3,_A : Optional[int]=True,_A : Tuple=True,_A : List[Any]=32,_A : Tuple=5,_A : List[Any]=4,_A : Optional[Any]=37,_A : Optional[int]="gelu",_A : Tuple=0.1,_A : Optional[int]=0.1,_A : List[Any]=10,_A : Optional[int]=0.02,_A : Tuple=3,_A : Optional[int]=0.6,_A : Any=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : str = image_size
SCREAMING_SNAKE_CASE_ : Dict = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_ratio
SCREAMING_SNAKE_CASE_ : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : List[str] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=_A,initializer_range=self.initializer_range,mask_ratio=self.mask_ratio,)
def __UpperCamelCase ( self : Any,_A : Any,_A : List[Any],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ViTMAEModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int],_A : Any,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ViTMAEForPreTraining(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE_ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = ViTMAEForPreTraining(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self,config_class=_A,has_text_modality=_A,hidden_size=37 )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
SCREAMING_SNAKE_CASE_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A,nn.Linear ) )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1],_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def __UpperCamelCase ( self : List[str],_A : List[str],_A : Optional[Any],_A : Union[str, Any] ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE_ : Dict = pt_noise
super().check_pt_tf_models(_A,_A,_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
model.to(_A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE_ : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class.from_pretrained(_A )
model.to(_A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(_A,_A ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE_ : Tuple = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A,1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = ViTMAEModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ : Dict = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(_A )
SCREAMING_SNAKE_CASE_ : int = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Any = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=_A,return_tensors="pt" ).to(_A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE_ : List[Any] = ViTMAEConfig()
SCREAMING_SNAKE_CASE_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**_A,noise=torch.from_numpy(_A ).to(device=_A ) )
# verify the logits
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3],expected_slice.to(_A ),atol=1E-4 ) )
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Tuple = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = RobertaTokenizer
def __init__( self : Optional[int],_A : str=None,_A : Any=None,_A : Tuple=None,_A : Optional[Any]="replace",_A : int="<s>",_A : int="</s>",_A : Tuple="</s>",_A : Optional[int]="<s>",_A : List[Any]="<unk>",_A : Optional[Any]="<pad>",_A : Dict="<mask>",_A : List[str]=False,_A : Optional[Any]=True,**_A : int,):
"""simple docstring"""
super().__init__(
_A,_A,tokenizer_file=_A,errors=_A,bos_token=_A,eos_token=_A,sep_token=_A,cls_token=_A,unk_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,trim_offsets=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",_A ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE_ : Optional[int] = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[Any] = add_prefix_space
SCREAMING_SNAKE_CASE_ : Optional[int] = "post_processor"
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.backend_tokenizer,_A,_A )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ : Tuple = tuple(state["sep"] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(state["cls"] )
SCREAMING_SNAKE_CASE_ : Optional[int] = False
if state.get("add_prefix_space",_A ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : List[Any] = add_prefix_space
SCREAMING_SNAKE_CASE_ : int = True
if state.get("trim_offsets",_A ) != trim_offsets:
SCREAMING_SNAKE_CASE_ : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE_ : List[Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ : int = getattr(_A,state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : str = component_class(**_A )
setattr(self.backend_tokenizer,_A,_A )
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
def __UpperCamelCase ( self : Union[str, Any],*_A : int,**_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = kwargs.get("is_split_into_words",_A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A,**_A )
def __UpperCamelCase ( self : Optional[int],*_A : int,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = kwargs.get("is_split_into_words",_A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A,**_A )
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : Dict,_A : Optional[int],_A : List[Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
import re
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self : List[Any],_A : Dict,_A : List[Any]=3,_A : Optional[int]=32,_A : str=3,_A : Optional[int]=10,_A : int=[10, 20, 30, 40],_A : str=[1, 1, 2, 1],_A : Tuple=True,_A : List[Any]=True,_A : int="relu",_A : List[Any]=3,_A : Dict=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = image_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = embeddings_size
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : int = scope
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size],self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,)
def __UpperCamelCase ( self : Optional[Any],_A : int,_A : Tuple,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFResNetModel(config=_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def __UpperCamelCase ( self : Dict,_A : int,_A : Optional[Any],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : str = TFResNetForImageClassification(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A = False
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFResNetModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self,config_class=_A,has_text_modality=_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(_A : int,_A : Tuple,_A : str ):
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(**self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_A ),expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ : List[str] = layer_type
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(_A,_A,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(_A,_A,_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_ : int = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Dict = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(images=_A,return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(),_A,atol=1E-4 ) )
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableUnCLIPImgaImgPipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 32
SCREAMING_SNAKE_CASE_ : Dict = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPImageProcessor(crop_size=32,size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_A,projection_dim=_A,num_hidden_layers=5,num_attention_heads=4,image_size=32,intermediate_size=37,patch_size=1,) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=_A )
SCREAMING_SNAKE_CASE_ : List[str] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=_A,projection_dim=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = UNetaDConditionModel(
sample_size=32,in_channels=4,out_channels=4,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),block_out_channels=(32, 64),attention_head_dim=(2, 4),class_embed_type="projection",projection_class_embeddings_input_dim=embedder_projection_dim * 2,cross_attention_dim=_A,layers_per_block=1,upcast_attention=_A,use_linear_projection=_A,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear",beta_start=0.00085,beta_end=0.012,prediction_type="v_prediction",set_alpha_to_one=_A,steps_offset=1,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL()
SCREAMING_SNAKE_CASE_ : str = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def __UpperCamelCase ( self : int,_A : str,_A : Optional[int]=0,_A : Union[str, Any]=True ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 32, 32),rng=random.Random(_A ) ).to(_A )
if pil_image:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_image.clamp(0,1 )
SCREAMING_SNAKE_CASE_ : int = input_image.cpu().permute(0,2,3,1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(_A )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Dict = StableUnCLIPImgaImgPipeline(**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_A )
inputs.update({"image_embeds": None} )
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img",torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(_A,"anime turle",generator=_A,output_type="np" )
SCREAMING_SNAKE_CASE_ : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img",torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(_A,"anime turle",generator=_A,output_type="np" )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A,_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img",torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(
_A,"anime turtle",num_inference_steps=2,output_type="np",)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class a__ ( A__ ):
A = CLIPConfig
A = ['CLIPEncoderLayer']
def __init__( self : Union[str, Any],_A : CLIPConfig ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
SCREAMING_SNAKE_CASE_ : int = nn.Linear(config.vision_config.projection_dim,1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(config.vision_config.projection_dim,1 )
@torch.no_grad()
def __UpperCamelCase ( self : Union[str, Any],_A : int,_A : Union[str, Any],_A : Optional[Any]=0.5,_A : Optional[Any]=0.5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.vision_model(_A )[0]
SCREAMING_SNAKE_CASE_ : Tuple = self.p_head(_A )
SCREAMING_SNAKE_CASE_ : List[str] = nsfw_detected.flatten()
SCREAMING_SNAKE_CASE_ : List[Any] = nsfw_detected > p_threshold
SCREAMING_SNAKE_CASE_ : Any = nsfw_detected.tolist()
if any(_A ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(_A ):
if nsfw_detected_:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.zeros(images[idx].shape )
SCREAMING_SNAKE_CASE_ : List[str] = self.w_head(_A )
SCREAMING_SNAKE_CASE_ : int = watermark_detected.flatten()
SCREAMING_SNAKE_CASE_ : List[str] = watermark_detected > w_threshold
SCREAMING_SNAKE_CASE_ : Optional[int] = watermark_detected.tolist()
if any(_A ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(_A ):
if watermark_detected_:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.