code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from functools import reduce
__lowerCamelCase : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( __magic_name__ = N )-> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __magic_name__ ,__magic_name__ : str(int(__magic_name__ ) * int(__magic_name__ ) ) ,n[i : i + 13] ) )
for i in range(len(__magic_name__ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 656 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 1 |
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = 256
# Modulus to hash a string
__lowerCamelCase : Optional[int] = 1000003
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = len(__magic_name__ )
snake_case_ : Optional[Any] = len(__magic_name__ )
if p_len > t_len:
return False
snake_case_ : int = 0
snake_case_ : List[Any] = 0
snake_case_ : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(__magic_name__ ):
snake_case_ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : str = "abc1abc12"
snake_case_ : Any = "alskfjaldsabc1abc1abc12k23adsfabcabc"
snake_case_ : str = "alskfjaldsk23adsfabcabc"
assert rabin_karp(__magic_name__ ,__magic_name__ ) and not rabin_karp(__magic_name__ ,__magic_name__ )
# Test 2)
snake_case_ : Tuple = "ABABX"
snake_case_ : str = "ABABZABABYABABX"
assert rabin_karp(__magic_name__ ,__magic_name__ )
# Test 3)
snake_case_ : List[str] = "AAAB"
snake_case_ : List[str] = "ABAAAAAB"
assert rabin_karp(__magic_name__ ,__magic_name__ )
# Test 4)
snake_case_ : List[str] = "abcdabcy"
snake_case_ : Dict = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(__magic_name__ ,__magic_name__ )
# Test 5)
snake_case_ : Optional[int] = "Lü"
snake_case_ : Tuple = "Lüsai"
assert rabin_karp(__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = "Lue"
assert not rabin_karp(__magic_name__ ,__magic_name__ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 656 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A_ (nn.Module ):
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = 1
a__ = True
a__ = False
a__ = False
a__ = False
a__ = jnp.floataa
def _A ( self :Any ) -> int:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : Any = FlaxResnetBlockaD(
in_channels=lowerCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase__ )
snake_case_ : Dict = resnets
snake_case_ : Union[str, Any] = attentions
if self.add_downsample:
snake_case_ : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any]=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : List[Any] = resnet(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
snake_case_ : Dict = attn(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Optional[Any] = self.downsamplers_a(lowerCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class A_ (nn.Module ):
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = True
a__ = jnp.floataa
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : Any = self.in_channels if i == 0 else self.out_channels
snake_case_ : int = FlaxResnetBlockaD(
in_channels=lowerCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = resnets
if self.add_downsample:
snake_case_ : Tuple = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = ()
for resnet in self.resnets:
snake_case_ : Any = resnet(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(lowerCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class A_ (nn.Module ):
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = 1
a__ = True
a__ = False
a__ = False
a__ = False
a__ = jnp.floataa
def _A ( self :Tuple ) -> str:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : List[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase__ )
snake_case_ : Optional[Any] = resnets
snake_case_ : List[str] = attentions
if self.add_upsample:
snake_case_ : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple=True ) -> int:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Any = res_hidden_states_tuple[-1]
snake_case_ : str = res_hidden_states_tuple[:-1]
snake_case_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Union[str, Any] = resnet(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
snake_case_ : Optional[int] = attn(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
if self.add_upsample:
snake_case_ : List[Any] = self.upsamplers_a(lowerCAmelCase__ )
return hidden_states
class A_ (nn.Module ):
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 42
a__ = 0.0
a__ = 1
a__ = True
a__ = jnp.floataa
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : List[str] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase__ )
snake_case_ : List[str] = resnets
if self.add_upsample:
snake_case_ : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int=True ) -> Tuple:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Optional[Any] = res_hidden_states_tuple[-1]
snake_case_ : int = res_hidden_states_tuple[:-1]
snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : int = resnet(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
if self.add_upsample:
snake_case_ : List[Any] = self.upsamplers_a(lowerCAmelCase__ )
return hidden_states
class A_ (nn.Module ):
"""simple docstring"""
a__ = 42
a__ = 0.0
a__ = 1
a__ = 1
a__ = False
a__ = False
a__ = jnp.floataa
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : str = []
for _ in range(self.num_layers ):
snake_case_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase__ )
snake_case_ : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = resnets
snake_case_ : List[str] = attentions
def __call__( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int=True ) -> int:
'''simple docstring'''
snake_case_ : List[str] = self.resnets[0](lowerCAmelCase__ , lowerCAmelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Optional[Any] = attn(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
snake_case_ : Optional[Any] = resnet(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=lowerCAmelCase__ )
return hidden_states
| 656 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 1 |
'''simple docstring'''
__lowerCamelCase : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__lowerCamelCase : str = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__lowerCamelCase : List[str] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__lowerCamelCase : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__lowerCamelCase : str = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__lowerCamelCase : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__lowerCamelCase : Optional[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__lowerCamelCase : Union[str, Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 656 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
if len(__magic_name__ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
snake_case_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_neo'''
a__ = ['''past_key_values''']
a__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :List[str] , lowerCAmelCase__ :Tuple=50_257 , lowerCAmelCase__ :Optional[Any]=2_048 , lowerCAmelCase__ :Tuple=2_048 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :List[str]=[[["global", "local"], 12]] , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Dict=256 , lowerCAmelCase__ :Union[str, Any]="gelu_new" , lowerCAmelCase__ :Dict=0.0 , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :Tuple=50_256 , **lowerCAmelCase__ :Any , ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = vocab_size
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Tuple = hidden_size
snake_case_ : int = num_layers
snake_case_ : List[str] = num_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = window_size
snake_case_ : List[Any] = activation_function
snake_case_ : List[Any] = resid_dropout
snake_case_ : Union[str, Any] = embed_dropout
snake_case_ : List[str] = attention_dropout
snake_case_ : List[Any] = classifier_dropout
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[int] = use_cache
snake_case_ : int = bos_token_id
snake_case_ : Dict = eos_token_id
snake_case_ : Any = attention_types
snake_case_ : Union[str, Any] = self.expand_attention_types_params(lowerCAmelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@staticmethod
def _A ( lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
import torch
snake_case_ : Optional[Any] = input.size()
snake_case_ : Optional[int] = len(__magic_name__ )
snake_case_ : List[Any] = shape[dimension]
snake_case_ : List[Any] = torch.arange(0 ,__magic_name__ ,__magic_name__ )
snake_case_ : str = torch.div(sizedim - size ,__magic_name__ ,rounding_mode="floor" ) + 1
snake_case_ : Dict = torch.arange(__magic_name__ ) + low_indices[:min_length][:, None]
snake_case_ : List[str] = [slice(__magic_name__ )] * rank
snake_case_ : List[str] = indices
snake_case_ : Optional[Any] = input[s]
snake_case_ : List[str] = list(range(0 ,rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
import torch
snake_case_ : List[Any] = torch.arange(1 ,__magic_name__ )
snake_case_ : List[str] = torch.remainder(__magic_name__ ,__magic_name__ )
snake_case_ : Optional[int] = remainders == 0
snake_case_ : Union[str, Any] = candidates[divisor_indices]
snake_case_ : Any = torch.max(__magic_name__ )
return largest_divisor, torch.div(__magic_name__ ,__magic_name__ ,rounding_mode="floor" )
class A_ (a_ ):
"""simple docstring"""
@property
def _A ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
snake_case_ : str = {0: "batch", 1: "past_sequence + sequence"}
else:
snake_case_ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._config.num_heads
def _A ( self :Optional[int] , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
snake_case_ : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_, snake_case_ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case_ : List[str] = seqlen + 2
snake_case_ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ : Union[str, Any] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
snake_case_ : List[str] = common_inputs["attention_mask"]
if self.use_past:
snake_case_ : Any = ordered_inputs["attention_mask"].dtype
snake_case_ : str = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def _A ( self :Any ) -> int:
'''simple docstring'''
return 13
| 656 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> bool:
"""simple docstring"""
if curr_ind == len(__magic_name__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 ,len(__magic_name__ ) ):
if valid_connection(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ):
# Insert current vertex into path as next transition
snake_case_ : int = next_ver
# Validate created path
if util_hamilton_cycle(__magic_name__ ,__magic_name__ ,curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Union[str, Any] = -1
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 0 )-> list[int]:
"""simple docstring"""
snake_case_ : List[str] = [-1] * (len(__magic_name__ ) + 1)
# initialize start and end of path with starting index
snake_case_ : str = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__magic_name__ ,__magic_name__ ,1 ) else []
| 656 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
"""simple docstring"""
a__ = 42
a__ = 42
def __init__( self :int , lowerCAmelCase__ :UNetaDModel , lowerCAmelCase__ :KarrasVeScheduler ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self :List[str] , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.unet.config.sample_size
snake_case_ : Optional[int] = (batch_size, 3, img_size, img_size)
snake_case_ : str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
snake_case_ : str = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
snake_case_ : List[str] = self.scheduler.schedule[t]
snake_case_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
snake_case_, snake_case_ : Union[str, Any] = self.scheduler.add_noise_to_input(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
snake_case_ : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
snake_case_ : str = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
snake_case_ : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
snake_case_ : List[str] = self.scheduler.step_correct(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , step_output.prev_sample , step_output["derivative"] , )
snake_case_ : Tuple = step_output.prev_sample
snake_case_ : Dict = (sample / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 656 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Any = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ['''GLPNFeatureExtractor''']
__lowerCamelCase : Any = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Any = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__lowerCamelCase : Optional[Any] = '''▁'''
class A_ (a_ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = BigBirdTokenizer
a__ = ['''input_ids''', '''attention_mask''']
a__ = []
def __init__( self :int , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[Any]="<unk>" , lowerCAmelCase__ :Any="<s>" , lowerCAmelCase__ :Union[str, Any]="</s>" , lowerCAmelCase__ :int="<pad>" , lowerCAmelCase__ :Dict="[SEP]" , lowerCAmelCase__ :Dict="[MASK]" , lowerCAmelCase__ :Any="[CLS]" , **lowerCAmelCase__ :Optional[int] , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
snake_case_ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
snake_case_ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
snake_case_ : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
snake_case_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
snake_case_ : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ : Optional[int] = vocab_file
snake_case_ : int = False if not self.vocab_file else True
def _A ( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self :Optional[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 656 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = namedtuple("result" ,"name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" ,power / current )
elif current == 0:
return result("current" ,power / voltage )
elif power == 0:
return result("power" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
return choice(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = random_pivot(__magic_name__ )
# partition based on pivot
# linear time
snake_case_ : Union[str, Any] = [e for e in lst if e < pivot]
snake_case_ : Union[str, Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__magic_name__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__magic_name__ ) < k - 1:
return kth_number(__magic_name__ ,k - len(__magic_name__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | 1 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A_ (pl.LightningModule ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = model
snake_case_ : Optional[Any] = 2
snake_case_ : List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : str = LongformerModel.from_pretrained(__magic_name__ )
snake_case_ : Union[str, Any] = LightningModel(__magic_name__ )
snake_case_ : str = torch.load(__magic_name__ ,map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
snake_case_ : int = LongformerForQuestionAnswering.from_pretrained(__magic_name__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__magic_name__ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 656 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : str = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCamelCase : str = 3
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
print("Generating primitive root of p" )
while True:
snake_case_ : int = random.randrange(3 ,__magic_name__ )
if pow(__magic_name__ ,2 ,__magic_name__ ) == 1:
continue
if pow(__magic_name__ ,__magic_name__ ,__magic_name__ ) == 1:
continue
return g
def __UpperCAmelCase ( __magic_name__ )-> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print("Generating prime p..." )
snake_case_ : str = rabin_miller.generate_large_prime(__magic_name__ ) # select large prime number.
snake_case_ : Dict = primitive_root(__magic_name__ ) # one primitive root on modulo p.
snake_case_ : Optional[int] = random.randrange(3 ,__magic_name__ ) # private_key -> have to be greater than 2 for safety.
snake_case_ : Union[str, Any] = cryptomath.find_mod_inverse(pow(__magic_name__ ,__magic_name__ ,__magic_name__ ) ,__magic_name__ )
snake_case_ : Optional[Any] = (key_size, e_a, e_a, p)
snake_case_ : Union[str, Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
snake_case_, snake_case_ : Union[str, Any] = generate_key(__magic_name__ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' ,"w" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' ,"w" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" ,2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 656 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : int = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
a__ = ['''pixel_values''']
def __init__( self :List[str] , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Dict[str, int]] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = size if size is not None else {"height": 224, "width": 224}
snake_case_ : str = get_size_dict(lowerCAmelCase__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = do_rescale
snake_case_ : Union[str, Any] = do_normalize
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = size
snake_case_ : List[str] = resample
snake_case_ : Tuple = rescale_factor
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _A ( self :List[str] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(lowerCAmelCase__ )
if "shortest_edge" in size:
snake_case_ : int = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
snake_case_ : Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Any , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : List[Any] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :int , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :float , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Tuple , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Tuple , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :int = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[float] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :int , ) -> BatchFeature:
'''simple docstring'''
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : int = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__ )
snake_case_ : Optional[Any] = resample if resample is not None else self.resample
snake_case_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : Tuple = size if size is not None else self.size
snake_case_ : Tuple = get_size_dict(lowerCAmelCase__ )
if not is_batched(lowerCAmelCase__ ):
snake_case_ : str = [images]
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
snake_case_ : Dict = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
snake_case_ : List[str] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
snake_case_ : List[str] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
snake_case_ : str = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 656 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 1 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Any = tf.convert_to_tensor(__magic_name__ )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : List[Any] = tf.convert_to_tensor(__magic_name__ )
snake_case_ : Optional[Any] = tf.cast(math.pi ,x.dtype )
snake_case_ : int = tf.cast(0.044_715 ,x.dtype )
snake_case_ : Optional[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ ,3 )) ))
return x * cdf
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = tf.convert_to_tensor(__magic_name__ )
snake_case_ : str = tf.cast(0.044_715 ,x.dtype )
snake_case_ : Tuple = tf.cast(0.7_978_845_608 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[str] = tf.convert_to_tensor(__magic_name__ )
snake_case_ : Optional[int] = tf.cast(1.702 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return tf.clip_by_value(_gelu(__magic_name__ ) ,-10 ,10 )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=-1 )-> List[str]:
"""simple docstring"""
snake_case_, snake_case_ : List[str] = tf.split(__magic_name__ ,2 ,axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
return tf.keras.activations.gelu(__magic_name__ ,approximate=__magic_name__ )
__lowerCamelCase : int = tf.keras.activations.gelu
__lowerCamelCase : str = approximate_gelu_wrap
else:
__lowerCamelCase : Optional[int] = _gelu
__lowerCamelCase : List[Any] = _gelu_new
__lowerCamelCase : List[str] = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 1 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = JukeboxTokenizer
a__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
import torch
snake_case_ : Optional[int] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
snake_case_ : Any = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case_ : Any = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _A ( self :Any ) -> str:
'''simple docstring'''
import torch
snake_case_ : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
snake_case_ : Any = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case_ : Optional[Any] = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 656 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( __magic_name__="" )-> str:
"""simple docstring"""
snake_case_ : Any = tempfile.mkdtemp()
return os.path.join(__magic_name__ ,str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Dict = AgentAudio(lowerCAmelCase__ )
snake_case_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : List[str] = sf.read(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , torch.tensor(lowerCAmelCase__ ) , atol=1E-4 ) )
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Union[str, Any] = get_new_path(suffix=".wav" )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , 16_000 )
snake_case_ : Dict = AgentAudio(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase__ )
@require_vision
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : str = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : List[str] = AgentImage(lowerCAmelCase__ )
snake_case_ : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
snake_case_ : Union[str, Any] = Image.open(lowerCAmelCase__ )
snake_case_ : List[str] = AgentImage(lowerCAmelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
snake_case_ : List[Any] = Image.open(lowerCAmelCase__ )
snake_case_ : int = AgentImage(lowerCAmelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = "Hey!"
snake_case_ : Dict = AgentText(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , agent_type.to_string() )
self.assertEqual(lowerCAmelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 656 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''unispeech'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :List[str]=3_072 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :str=0.0_2 , lowerCAmelCase__ :Tuple=1E-5 , lowerCAmelCase__ :Optional[Any]="group" , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__ :Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ :Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Tuple=128 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.0_5 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :List[str]=320 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :List[Any]=100 , lowerCAmelCase__ :Tuple=256 , lowerCAmelCase__ :Optional[int]=256 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[Any]="mean" , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[str]=256 , lowerCAmelCase__ :Dict=80 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :int=0.5 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
snake_case_ : int = hidden_size
snake_case_ : List[Any] = feat_extract_norm
snake_case_ : List[str] = feat_extract_activation
snake_case_ : int = list(lowerCAmelCase__ )
snake_case_ : Dict = list(lowerCAmelCase__ )
snake_case_ : Any = list(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = conv_bias
snake_case_ : int = num_conv_pos_embeddings
snake_case_ : Optional[Any] = num_conv_pos_embedding_groups
snake_case_ : Any = len(self.conv_dim )
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[Any] = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_dropout
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = activation_dropout
snake_case_ : Dict = feat_proj_dropout
snake_case_ : Optional[Any] = final_dropout
snake_case_ : Tuple = layerdrop
snake_case_ : str = layer_norm_eps
snake_case_ : Dict = initializer_range
snake_case_ : List[Any] = num_ctc_classes
snake_case_ : Optional[int] = vocab_size
snake_case_ : Any = do_stable_layer_norm
snake_case_ : Any = use_weighted_layer_sum
snake_case_ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : int = apply_spec_augment
snake_case_ : Tuple = mask_time_prob
snake_case_ : str = mask_time_length
snake_case_ : List[Any] = mask_time_min_masks
snake_case_ : str = mask_feature_prob
snake_case_ : Optional[int] = mask_feature_length
snake_case_ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ : Optional[Any] = num_codevectors_per_group
snake_case_ : Dict = num_codevector_groups
snake_case_ : List[Any] = contrastive_logits_temperature
snake_case_ : Dict = feat_quantizer_dropout
snake_case_ : Dict = num_negatives
snake_case_ : Optional[Any] = codevector_dim
snake_case_ : Any = proj_codevector_dim
snake_case_ : str = diversity_loss_weight
# ctc loss
snake_case_ : Optional[Any] = ctc_loss_reduction
snake_case_ : List[Any] = ctc_zero_infinity
# pretraining loss
snake_case_ : Optional[int] = replace_prob
@property
def _A ( self :Tuple ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 656 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 1 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 656 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase : str = tuple[int, int]
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Node | None , ) -> None:
'''simple docstring'''
snake_case_ : Optional[int] = pos_x
snake_case_ : int = pos_y
snake_case_ : int = (pos_y, pos_x)
snake_case_ : Optional[int] = goal_x
snake_case_ : Optional[Any] = goal_y
snake_case_ : Any = g_cost
snake_case_ : List[Any] = parent
snake_case_ : Union[str, Any] = self.calculate_heuristic()
snake_case_ : Optional[int] = self.g_cost + self.h_cost
def _A ( self :Dict ) -> float:
'''simple docstring'''
snake_case_ : List[str] = self.pos_x - self.goal_x
snake_case_ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase__ ) + abs(lowerCAmelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self :Optional[int] , lowerCAmelCase__ :Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :TPosition , lowerCAmelCase__ :TPosition ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ )
snake_case_ : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCAmelCase__ )
snake_case_ : List[Any] = [self.start]
snake_case_ : list[Node] = []
snake_case_ : Tuple = False
def _A ( self :Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase__ )
self.closed_nodes.append(lowerCAmelCase__ )
snake_case_ : List[str] = self.get_successors(lowerCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
snake_case_ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__ )
else:
self.open_nodes.append(lowerCAmelCase__ )
return [self.start.pos]
def _A ( self :Optional[Any] , lowerCAmelCase__ :Node ) -> list[Node]:
'''simple docstring'''
snake_case_ : Optional[int] = []
for action in delta:
snake_case_ : Tuple = parent.pos_x + action[1]
snake_case_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) )
return successors
def _A ( self :Any , lowerCAmelCase__ :Node | None ) -> list[TPosition]:
'''simple docstring'''
snake_case_ : Optional[int] = node
snake_case_ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ : List[str] = current_node.parent
path.reverse()
return path
class A_ :
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :TPosition , lowerCAmelCase__ :TPosition ) -> None:
'''simple docstring'''
snake_case_ : Any = AStar(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : str = AStar(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Dict = False
def _A ( self :List[Any] ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
snake_case_ : Union[str, Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase__ , lowerCAmelCase__ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase__ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase__ )
snake_case_ : str = current_bwd_node
snake_case_ : Dict = current_fwd_node
snake_case_ : Any = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
snake_case_ : str = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase__ )
else:
astar.open_nodes.append(lowerCAmelCase__ )
return [self.fwd_astar.start.pos]
def _A ( self :int , lowerCAmelCase__ :Node , lowerCAmelCase__ :Node ) -> list[TPosition]:
'''simple docstring'''
snake_case_ : str = self.fwd_astar.retrace_path(lowerCAmelCase__ )
snake_case_ : Any = self.bwd_astar.retrace_path(lowerCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
snake_case_ : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase : Union[str, Any] = (0, 0)
__lowerCamelCase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : str = time.time()
__lowerCamelCase : Dict = AStar(init, goal)
__lowerCamelCase : int = a_star.search()
__lowerCamelCase : List[Any] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__lowerCamelCase : Tuple = time.time()
__lowerCamelCase : List[str] = BidirectionalAStar(init, goal)
__lowerCamelCase : Optional[int] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 656 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[int] = BigBirdConfig.from_json_file(__magic_name__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
snake_case_ : str = BigBirdForQuestionAnswering(__magic_name__ )
else:
snake_case_ : List[Any] = BigBirdForPreTraining(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__magic_name__ ,__magic_name__ ,is_trivia_qa=__magic_name__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__lowerCamelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 656 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ = 200_0000 )-> int:
"""simple docstring"""
snake_case_ : int = [0 for i in range(n + 1 )]
snake_case_ : List[Any] = 1
snake_case_ : Optional[Any] = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,__magic_name__ ):
snake_case_ : List[str] = 1
snake_case_ : List[Any] = 0
for i in range(__magic_name__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 656 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : List[Any] = [True] * 1000001
__lowerCamelCase : List[Any] = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__lowerCamelCase : Tuple = False
i += 1
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
return seive[n]
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
return any(digit in "02468" for digit in str(__magic_name__ ) )
def __UpperCAmelCase ( __magic_name__ = 100_0000 )-> list[int]:
"""simple docstring"""
snake_case_ : Optional[int] = [2] # result already includes the number 2.
for num in range(3 ,limit + 1 ,2 ):
if is_prime(__magic_name__ ) and not contains_an_even_digit(__magic_name__ ):
snake_case_ : List[str] = str(__magic_name__ )
snake_case_ : Optional[int] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__magic_name__ ) )]
if all(is_prime(__magic_name__ ) for i in list_nums ):
result.append(__magic_name__ )
return result
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 656 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCamelCase : Union[str, Any] = '''sshleifer/bart-tiny-random'''
__lowerCamelCase : Optional[int] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_, *snake_case_ : int = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_, *snake_case_ : int = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, *snake_case_ : Tuple = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_, *snake_case_ : Dict = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=lowerCAmelCase__ , d=lowerCAmelCase__ )
| 656 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 1 |
'''simple docstring'''
import math
import random
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = False )-> float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__lowerCamelCase : List[str] = 0.02
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
snake_case_ : Union[str, Any] = float(2 * (random.randint(1 ,100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
snake_case_ : Optional[int] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
snake_case_ : int = (expected / 100) - layer_a
# Error delta
snake_case_ : Any = layer_1_error * sigmoid_function(__magic_name__ ,__magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : str = int(input('''Expected value: '''))
__lowerCamelCase : Union[str, Any] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 656 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Tuple = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
__lowerCamelCase : Optional[Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = torch.load(__magic_name__ ,map_location="cpu" )
return sd
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=rename_keys_prefix )-> Optional[int]:
"""simple docstring"""
snake_case_ : int = OrderedDict()
snake_case_ : Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case_ : Optional[Any] = key
for name_pair in rename_keys_prefix:
snake_case_ : Optional[Any] = new_key.replace(name_pair[0] ,name_pair[1] )
snake_case_ : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case_ : Dict = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
snake_case_ : List[str] = "pretraining"
if "vcr" in checkpoint_path:
snake_case_ : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
snake_case_ : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
snake_case_ : Tuple = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
snake_case_ : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
snake_case_ : List[Any] = {"visual_embedding_dim": 512}
snake_case_ : str = "multichoice"
elif "vqa_advanced" in checkpoint_path:
snake_case_ : Tuple = {"visual_embedding_dim": 2048}
snake_case_ : Union[str, Any] = "vqa_advanced"
elif "vqa" in checkpoint_path:
snake_case_ : Optional[int] = {"visual_embedding_dim": 2048, "num_labels": 3129}
snake_case_ : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
snake_case_ : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
snake_case_ : Dict = "nlvr"
snake_case_ : List[str] = VisualBertConfig(**__magic_name__ )
# Load State Dict
snake_case_ : Any = load_state_dict(__magic_name__ )
snake_case_ : Dict = get_new_dict(__magic_name__ ,__magic_name__ )
if model_type == "pretraining":
snake_case_ : Dict = VisualBertForPreTraining(__magic_name__ )
elif model_type == "vqa":
snake_case_ : List[Any] = VisualBertForQuestionAnswering(__magic_name__ )
elif model_type == "nlvr":
snake_case_ : Union[str, Any] = VisualBertForVisualReasoning(__magic_name__ )
elif model_type == "multichoice":
snake_case_ : List[Any] = VisualBertForMultipleChoice(__magic_name__ )
model.load_state_dict(__magic_name__ )
# Save Checkpoints
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
__lowerCamelCase : List[Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : str = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :Tuple=13 , lowerCAmelCase__ :int=7 , lowerCAmelCase__ :Dict=14 , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :str=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :Dict=25 , lowerCAmelCase__ :Any=5 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = d_model
snake_case_ : Any = parent
snake_case_ : Tuple = batch_size
snake_case_ : Tuple = prediction_length
snake_case_ : List[str] = context_length
snake_case_ : List[str] = cardinality
snake_case_ : Optional[Any] = num_time_features
snake_case_ : Dict = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : List[str] = is_training
snake_case_ : int = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Optional[int] = context_length
snake_case_ : Dict = prediction_length + label_length
snake_case_ : Dict = label_length
snake_case_ : Union[str, Any] = moving_average
snake_case_ : int = autocorrelation_factor
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : Any = floats_tensor([self.batch_size, _past_length] )
snake_case_ : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : Tuple = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : Optional[Any] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_config()
snake_case_ : Any = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_, snake_case_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Union[str, Any] = model(**lowerCAmelCase__ )
snake_case_ : Dict = outputs.encoder_last_hidden_state
snake_case_ : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : str = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[str] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : str = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Optional[int] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : int = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : int = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def _A ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Any = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ )
snake_case_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Tuple = [*signature.parameters.keys()]
snake_case_ : Optional[int] = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[str] = True
snake_case_ : Dict = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : str = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : int = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Tuple = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : int = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : int = True
snake_case_ : Dict = False
snake_case_ : Optional[Any] = True
snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : str = True
snake_case_ : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Any = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : List[Any] = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : int = len(lowerCAmelCase__ )
snake_case_ : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Any = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = True
snake_case_ : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : Any = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : str = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : int = prepare_batch()
with torch.no_grad():
snake_case_ : Dict = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : int = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Tuple = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Any = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : List[Any] = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : int = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Union[str, Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 656 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 1 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( __magic_name__ = "isbn/0140328726" )-> dict:
"""simple docstring"""
snake_case_ : Optional[Any] = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
snake_case_ : Optional[Any] = F'''{olid} is not a valid Open Library olid'''
raise ValueError(__magic_name__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def __UpperCAmelCase ( __magic_name__ )-> dict:
"""simple docstring"""
snake_case_ : Dict = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
snake_case_ : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case_ : str = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
snake_case_ : str = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(__magic_name__ ,__magic_name__ ):
snake_case_ : Dict = ", ".join(__magic_name__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__lowerCamelCase : List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__lowerCamelCase : Tuple = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('''\n'''.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 656 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 1 |
'''simple docstring'''
import socket
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
snake_case_ : Union[str, Any] = socket.gethostname()
snake_case_ : Union[str, Any] = 1_2312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" ,"wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case_ : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(__magic_name__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 656 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
snake_case_ : List[Any] = True if "large" in model_name or "huge" in model_name else False
snake_case_ : List[Any] = True if "large" in model_name or "huge" in model_name else False
snake_case_ : int = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case_ : List[Any] = [3, 3, 3, 3]
snake_case_ : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case_ : int = [4, 4, 4, 4]
snake_case_ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case_ : str = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case_ : List[Any] = [3, 3, 3, 3]
else:
snake_case_ : List[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case_ : List[Any] = 96
elif "small" in model_name:
snake_case_ : Any = 96
elif "base" in model_name:
snake_case_ : str = 128
elif "large" in model_name:
snake_case_ : List[Any] = 192
elif "xlarge" in model_name:
snake_case_ : int = 256
elif "huge" in model_name:
snake_case_ : str = 352
# set label information
snake_case_ : int = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
snake_case_ : List[Any] = "imagenet-22k-id2label.json"
else:
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : str = {v: k for k, v in idalabel.items()}
snake_case_ : int = FocalNetConfig(
embed_dim=__magic_name__ ,depths=__magic_name__ ,focal_levels=__magic_name__ ,focal_windows=__magic_name__ ,use_conv_embed=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ ,use_post_layernorm=__magic_name__ ,use_layerscale=__magic_name__ ,)
return config
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
if "patch_embed.proj" in name:
snake_case_ : Union[str, Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ : Tuple = name.replace("patch_embed.norm" ,"embeddings.norm" )
if "layers" in name:
snake_case_ : Optional[int] = "encoder." + name
if "encoder.layers" in name:
snake_case_ : Tuple = name.replace("encoder.layers" ,"encoder.stages" )
if "downsample.proj" in name:
snake_case_ : Tuple = name.replace("downsample.proj" ,"downsample.projection" )
if "blocks" in name:
snake_case_ : int = name.replace("blocks" ,"layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case_ : List[Any] = name.replace("modulation.f" ,"modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case_ : str = name.replace("modulation.h" ,"modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case_ : int = name.replace("modulation.proj" ,"modulation.projection_out" )
if name == "norm.weight":
snake_case_ : Any = "layernorm.weight"
if name == "norm.bias":
snake_case_ : Any = "layernorm.bias"
if "head" in name:
snake_case_ : Optional[Any] = name.replace("head" ,"classifier" )
else:
snake_case_ : List[str] = "focalnet." + name
return name
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=False )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
snake_case_ : Any = model_name_to_url[model_name]
print("Checkpoint URL: " ,__magic_name__ )
snake_case_ : Union[str, Any] = torch.hub.load_state_dict_from_url(__magic_name__ ,map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
snake_case_ : Optional[int] = state_dict.pop(__magic_name__ )
snake_case_ : List[str] = val
snake_case_ : str = get_focalnet_config(__magic_name__ )
snake_case_ : List[Any] = FocalNetForImageClassification(__magic_name__ )
model.eval()
# load state dict
model.load_state_dict(__magic_name__ )
# verify conversion
snake_case_ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Any = BitImageProcessor(
do_resize=__magic_name__ ,size={"shortest_edge": 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=__magic_name__ ,crop_size=224 ,do_normalize=__magic_name__ ,image_mean=__magic_name__ ,image_std=__magic_name__ ,)
snake_case_ : Any = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
snake_case_ : Tuple = processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
snake_case_ : Optional[Any] = image_transforms(__magic_name__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,__magic_name__ ,atol=1E-4 )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Dict = outputs.logits.argmax(-1 ).item()
print("Predicted class:" ,model.config.idalabel[predicted_class_idx] )
print("First values of logits:" ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case_ : Tuple = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
snake_case_ : Optional[Any] = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
snake_case_ : List[Any] = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
snake_case_ : Optional[int] = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
snake_case_ : List[Any] = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
snake_case_ : List[str] = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,__magic_name__ ,atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 656 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : List[str] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
__lowerCamelCase : Optional[int] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class A_ (a_ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = BertTokenizer
def __init__( self :int , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[str]="[UNK]" , lowerCAmelCase__ :List[str]="[SEP]" , lowerCAmelCase__ :int="[PAD]" , lowerCAmelCase__ :Optional[int]="[CLS]" , lowerCAmelCase__ :List[str]="[MASK]" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :List[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
snake_case_ : Dict = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
snake_case_ : int = do_lower_case
snake_case_ : Tuple = strip_accents
snake_case_ : int = tokenize_chinese_chars
snake_case_ : Dict = normalizer_class(**lowerCAmelCase__ )
snake_case_ : Dict = do_lower_case
def _A ( self :List[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=None ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self :List[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 656 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | 1 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (a_ ):
"""simple docstring"""
a__ = (DDIMParallelScheduler,)
a__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowerCAmelCase__ )
return config
def _A ( self :Any , **lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : Tuple = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
snake_case_, snake_case_ : Union[str, Any] = 10, 0.0
snake_case_ : Any = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for t in scheduler.timesteps:
snake_case_ : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : str = self.get_scheduler_config(steps_offset=1 )
snake_case_ : Tuple = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase__ , eta=lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : Any = self.get_scheduler_config()
snake_case_ : str = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**lowerCAmelCase__ )
snake_case_, snake_case_ : Any = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase__ )
snake_case_ : Optional[int] = self.dummy_model()
snake_case_ : Tuple = self.dummy_sample_deter
snake_case_ : str = self.dummy_sample_deter + 0.1
snake_case_ : Optional[Any] = self.dummy_sample_deter - 0.1
snake_case_ : int = samplea.shape[0]
snake_case_ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case_ : int = torch.arange(lowerCAmelCase__ )[0:3, None].repeat(1 , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case_ : List[Any] = scheduler.batch_step_no_noise(lowerCAmelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase__ )
snake_case_ : Any = torch.sum(torch.abs(lowerCAmelCase__ ) )
snake_case_ : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.full_loop()
snake_case_ : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
snake_case_ : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.full_loop(prediction_type="v_prediction" )
snake_case_ : str = torch.sum(torch.abs(lowerCAmelCase__ ) )
snake_case_ : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.0_1 )
snake_case_ : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
snake_case_ : Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def _A ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.0_1 )
snake_case_ : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
snake_case_ : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 656 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 1 |
'''simple docstring'''
import os
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = os.path.dirname(os.path.realpath(__magic_name__ ) )
snake_case_ : List[Any] = os.path.join(__magic_name__ ,"triangle.txt" )
with open(__magic_name__ ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : Tuple = []
for line in triangle:
snake_case_ : Any = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 ,len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
snake_case_ : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case_ : Optional[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ ,__magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__lowerCamelCase : str = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__lowerCamelCase : int = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__lowerCamelCase : Tuple = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__lowerCamelCase : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__lowerCamelCase : Optional[int] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__lowerCamelCase : Optional[int] = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
if isinstance(__magic_name__ ,__magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=False )-> str:
"""simple docstring"""
snake_case_ : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
snake_case_ : Dict = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
snake_case_ : List[Any] = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
snake_case_ : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
snake_case_ : Optional[Any] = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
snake_case_ : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
snake_case_ : List[str] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
snake_case_ : int = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
snake_case_ : Any = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
snake_case_ : int = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
snake_case_ : List[str] = checkpoint[F'''{old_prefix}.skip_connection.weight''']
snake_case_ : str = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None )-> Union[str, Any]:
"""simple docstring"""
snake_case_, snake_case_, snake_case_ : Any = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 ,dim=0 )
snake_case_, snake_case_, snake_case_ : Optional[int] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 ,dim=0 )
snake_case_ : Optional[int] = checkpoint[F'''{old_prefix}.norm.weight''']
snake_case_ : str = checkpoint[F'''{old_prefix}.norm.bias''']
snake_case_ : int = weight_q.squeeze(-1 ).squeeze(-1 )
snake_case_ : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
snake_case_ : Optional[int] = weight_k.squeeze(-1 ).squeeze(-1 )
snake_case_ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
snake_case_ : Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
snake_case_ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
snake_case_ : Dict = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
snake_case_ : Optional[Any] = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = torch.load(__magic_name__ ,map_location="cpu" )
snake_case_ : Tuple = {}
snake_case_ : Any = checkpoint["time_embed.0.weight"]
snake_case_ : Union[str, Any] = checkpoint["time_embed.0.bias"]
snake_case_ : Tuple = checkpoint["time_embed.2.weight"]
snake_case_ : Optional[Any] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
snake_case_ : Union[str, Any] = checkpoint["label_emb.weight"]
snake_case_ : Optional[int] = checkpoint["input_blocks.0.0.weight"]
snake_case_ : Dict = checkpoint["input_blocks.0.0.bias"]
snake_case_ : Any = unet_config["down_block_types"]
snake_case_ : Union[str, Any] = unet_config["layers_per_block"]
snake_case_ : Optional[int] = unet_config["attention_head_dim"]
snake_case_ : List[str] = unet_config["block_out_channels"]
snake_case_ : Any = 1
snake_case_ : int = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
snake_case_ : int = channels_list[i]
snake_case_ : Any = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
snake_case_ : str = F'''down_blocks.{i}.resnets.{j}'''
snake_case_ : Optional[int] = F'''input_blocks.{current_layer}.0'''
snake_case_ : List[str] = True if j == 0 and downsample_block_has_skip else False
snake_case_ : Optional[Any] = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
snake_case_ : Dict = F'''down_blocks.{i}.resnets.{j}'''
snake_case_ : List[Any] = F'''input_blocks.{current_layer}.0'''
snake_case_ : int = True if j == 0 and downsample_block_has_skip else False
snake_case_ : Dict = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,has_skip=__magic_name__ )
snake_case_ : List[Any] = F'''down_blocks.{i}.attentions.{j}'''
snake_case_ : List[str] = F'''input_blocks.{current_layer}.1'''
snake_case_ : Dict = convert_attention(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
snake_case_ : Optional[Any] = F'''down_blocks.{i}.downsamplers.0'''
snake_case_ : List[str] = F'''input_blocks.{current_layer}.0'''
snake_case_ : int = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
current_layer += 1
snake_case_ : Any = current_channels
# hardcoded the mid-block for now
snake_case_ : Optional[Any] = "mid_block.resnets.0"
snake_case_ : int = "middle_block.0"
snake_case_ : Optional[Any] = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : int = "mid_block.attentions.0"
snake_case_ : Optional[int] = "middle_block.1"
snake_case_ : Dict = convert_attention(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Dict = "mid_block.resnets.1"
snake_case_ : Tuple = "middle_block.2"
snake_case_ : Dict = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = unet_config["up_block_types"]
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ : Tuple = F'''up_blocks.{i}.resnets.{j}'''
snake_case_ : Any = F'''output_blocks.{current_layer}.0'''
snake_case_ : str = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
snake_case_ : Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
snake_case_ : Any = F'''output_blocks.{current_layer-1}.1'''
snake_case_ : Tuple = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ : Tuple = F'''up_blocks.{i}.resnets.{j}'''
snake_case_ : int = F'''output_blocks.{current_layer}.0'''
snake_case_ : Optional[int] = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,has_skip=__magic_name__ )
snake_case_ : Dict = F'''up_blocks.{i}.attentions.{j}'''
snake_case_ : Optional[int] = F'''output_blocks.{current_layer}.1'''
snake_case_ : Any = convert_attention(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
snake_case_ : Optional[Any] = F'''up_blocks.{i}.upsamplers.0'''
snake_case_ : Optional[int] = F'''output_blocks.{current_layer-1}.2'''
snake_case_ : int = convert_resnet(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = checkpoint["out.0.weight"]
snake_case_ : Optional[int] = checkpoint["out.0.bias"]
snake_case_ : Dict = checkpoint["out.2.weight"]
snake_case_ : List[str] = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : Dict = strabool(args.class_cond)
__lowerCamelCase : Dict = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__lowerCamelCase : List[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCamelCase : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__lowerCamelCase : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Any = con_pt_to_diffuser(args.unet_path, unet_config)
__lowerCamelCase : List[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__lowerCamelCase : List[str] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__lowerCamelCase : Optional[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCamelCase : Dict = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
__lowerCamelCase : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 656 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCamelCase : Dict = 10
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
for i in range(__magic_name__ ,__magic_name__ ):
if array[i] == target:
return i
return -1
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = 0
snake_case_ : List[str] = len(__magic_name__ )
while left <= right:
if right - left < precision:
return lin_search(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : int = (left + right) // 3 + 1
snake_case_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case_ : Any = one_third - 1
elif array[two_third] < target:
snake_case_ : int = two_third + 1
else:
snake_case_ : Optional[Any] = one_third + 1
snake_case_ : str = two_third - 1
else:
return -1
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = (left + right) // 3 + 1
snake_case_ : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__magic_name__ ,one_third - 1 ,__magic_name__ ,__magic_name__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,__magic_name__ ,__magic_name__ ,__magic_name__ )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,__magic_name__ ,__magic_name__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
__lowerCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCamelCase : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
__lowerCamelCase : int = ite_ternary_search(collection, target)
__lowerCamelCase : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 656 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 1 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {'''vocab_file''': '''spiece.model'''}
__lowerCamelCase : str = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :List[str]="</s>" , lowerCAmelCase__ :List[str]="<unk>" , lowerCAmelCase__ :Union[str, Any]="<sep>" , lowerCAmelCase__ :List[str]="<pad>" , lowerCAmelCase__ :str="<cls>" , lowerCAmelCase__ :Dict="<mask>" , lowerCAmelCase__ :Optional[Any]=["<eop>", "<eod>"] , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :Union[str, Any] , ) -> None:
'''simple docstring'''
snake_case_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
snake_case_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case_ : Union[str, Any] = 3
snake_case_ : str = do_lower_case
snake_case_ : Tuple = remove_space
snake_case_ : Tuple = keep_accents
snake_case_ : Tuple = vocab_file
snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
snake_case_ : Any = jieba
snake_case_ : Dict = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _A ( self :List[str] ) -> str:
'''simple docstring'''
return len(self.sp_model )
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.__dict__.copy()
snake_case_ : Optional[Any] = None
return state
def __setstate__( self :int , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ : Any = {}
snake_case_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
if self.remove_space:
snake_case_ : Dict = " ".join(inputs.strip().split() )
else:
snake_case_ : Union[str, Any] = inputs
snake_case_ : List[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
snake_case_ : int = unicodedata.normalize("NFKD" , lowerCAmelCase__ )
snake_case_ : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__ )] )
if self.do_lower_case:
snake_case_ : Optional[int] = outputs.lower()
return outputs
def _A ( self :Union[str, Any] , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.preprocess_text(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
snake_case_ : Optional[Any] = []
for piece in pieces:
if len(lowerCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
snake_case_ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ : int = cur_pieces[1:]
else:
snake_case_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase__ )
else:
new_pieces.append(lowerCAmelCase__ )
return new_pieces
def _A ( self :Any , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase__ )
def _A ( self :int , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def _A ( self :Optional[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _A ( self :Dict , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1]
return ([0] * len(lowerCAmelCase__ )) + [1, 1]
def _A ( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _A ( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
snake_case_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _A ( self :List[str] , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = super()._decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[str] = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__lowerCamelCase : Optional[Any] = False
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any , lowerCAmelCase__ :Optional[int]=32 ) -> Optional[Any]:
'''simple docstring'''
set_seed(0 )
snake_case_ : Dict = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
snake_case_ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
snake_case_ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
snake_case_ : List[str] = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
snake_case_ : Any = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case_ : Optional[Any] = [torch.randn((4, 3, 32, 32) ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case_ : Any = [torch.randint(0 , 1_000 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
snake_case_, snake_case_ : int = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : Dict = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case_ : List[Any] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
snake_case_, snake_case_ : str = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : str = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case_ : Dict = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
| 656 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class A_ (a_ , a_ ):
"""simple docstring"""
a__ = '''convnextv2'''
def __init__( self :str , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :List[Any]=1E-1_2 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Tuple=224 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Optional[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = num_channels
snake_case_ : Any = patch_size
snake_case_ : Dict = num_stages
snake_case_ : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
snake_case_ : List[Any] = [3, 3, 9, 3] if depths is None else depths
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : Union[str, Any] = image_size
snake_case_ : List[str] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
snake_case_, snake_case_ : int = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 656 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Optional[int] , lowerCAmelCase__ :nn.Module , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : int = module
snake_case_ : Dict = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase__ , bias=lowerCAmelCase__ ) , nn.Linear(lowerCAmelCase__ , module.out_features , bias=lowerCAmelCase__ ) , )
snake_case_ : List[str] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
return self.module(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) + self.adapter(lowerCAmelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''bigscience/bloom-1b7'''
# Constant values
a__ = 2.109659552692574
a__ = '''Hello my name is'''
a__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
a__ = 10
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name )
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
snake_case_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
def _A ( self :Any ) -> Any:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase__ , "quantization_config" ) )
snake_case_ : List[Any] = config.to_dict()
snake_case_ : Optional[int] = config.to_diff_dict()
snake_case_ : int = config.to_json_string()
def _A ( self :List[str] ) -> int:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case_ : Union[str, Any] = self.model_fpaa.get_memory_footprint()
snake_case_ : Any = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
snake_case_ : str = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
snake_case_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = BitsAndBytesConfig()
snake_case_ : int = True
snake_case_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase__ , device_map="auto" )
snake_case_ : List[str] = self.tokenizer(self.input_text , return_tensors="pt" )
snake_case_ : Any = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase__ , load_in_abit=lowerCAmelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _A ( self :Any ) -> int:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case_ : Any = self.tokenizer(self.input_text , return_tensors="pt" )
snake_case_ : Optional[Any] = self.model_fpaa.to(torch.floataa )
snake_case_ : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
snake_case_ : Union[str, Any] = self.model_fpaa.to("cpu" )
# Check this does not throw an error
snake_case_ : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
snake_case_ : int = self.model_fpaa.float()
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCAmelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _A ( cls :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = "t5-small"
snake_case_ : Union[str, Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case_ : int = AutoTokenizer.from_pretrained(cls.model_name )
snake_case_ : str = "Translate in German: Hello, my dog is cute"
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Dict ) -> Any:
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case_ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case_ : Optional[Any] = None
# test with `t5-small`
snake_case_ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
snake_case_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
snake_case_ : int = model.generate(**lowerCAmelCase__ )
# test with `flan-t5-small`
snake_case_ : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
snake_case_ : List[str] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
snake_case_ : Optional[Any] = model.generate(**lowerCAmelCase__ )
snake_case_ : List[str] = modules
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
snake_case_ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
snake_case_ : List[str] = model.generate(**lowerCAmelCase__ )
# test with `flan-t5-small`
snake_case_ : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
snake_case_ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
snake_case_ : Optional[Any] = model.generate(**lowerCAmelCase__ )
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# model_name
snake_case_ : Union[str, Any] = "bigscience/bloom-560m"
snake_case_ : Optional[Any] = "t5-small"
# Different types of model
snake_case_ : Optional[int] = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# Sequence classification model
snake_case_ : str = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# CausalLM model
snake_case_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# Seq2seq model
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Any ) -> Dict:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
def _A ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case_ : Dict = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
def _A ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
snake_case_ : Any = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
snake_case_ : Tuple = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Dict = "facebook/opt-350m"
super().setUp()
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
snake_case_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
snake_case_ : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case_ : Optional[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase__ ) ):
snake_case_ : Union[str, Any] = LoRALayer(module.q_proj , rank=16 )
snake_case_ : Dict = LoRALayer(module.k_proj , rank=16 )
snake_case_ : Optional[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
snake_case_ : Optional[int] = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case_ : Optional[int] = model.forward(**lowerCAmelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt2-xl'''
a__ = 3.3191854854152187
| 656 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''megatron-bert'''
def __init__( self :List[str] , lowerCAmelCase__ :str=29_056 , lowerCAmelCase__ :Tuple=1_024 , lowerCAmelCase__ :int=24 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :List[str]=4_096 , lowerCAmelCase__ :List[str]="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Union[str, Any]=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]="absolute" , lowerCAmelCase__ :Tuple=True , **lowerCAmelCase__ :Dict , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : List[Any] = position_embedding_type
snake_case_ : str = use_cache
| 656 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class A_ (_lowerCamelCase ):
"""simple docstring"""
a__ = '''luke'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=50_267 , lowerCAmelCase__ :List[Any]=500_000 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=256 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :List[str]=3_072 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=1 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :List[Any]=2 , **lowerCAmelCase__ :Any , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = entity_vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = entity_emb_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : str = use_entity_aware_attention
snake_case_ : Optional[Any] = classifier_dropout
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 701 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = None ,)-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = {}
if train_file is not None:
snake_case_ : Any = [train_file]
if eval_file is not None:
snake_case_ : int = [eval_file]
if test_file is not None:
snake_case_ : Union[str, Any] = [test_file]
snake_case_ : Dict = datasets.load_dataset("csv" ,data_files=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
snake_case_ : Tuple = features_name.pop(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case_ : Any = {label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )}
snake_case_ : Tuple = tokenizer.model_input_names
snake_case_ : List[str] = {}
if len(_SCREAMING_SNAKE_CASE ) == 1:
for k in files.keys():
snake_case_ : List[str] = ds[k].map(
lambda __magic_name__ : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" ) ,batched=_SCREAMING_SNAKE_CASE ,)
elif len(_SCREAMING_SNAKE_CASE ) == 2:
for k in files.keys():
snake_case_ : Union[str, Any] = ds[k].map(
lambda __magic_name__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" ,) ,batched=_SCREAMING_SNAKE_CASE ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case_ : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case_ : List[str] = {k: v for k, v in ex.items() if k in input_names}
snake_case_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
snake_case_ : Dict = labelaid[ex[label_name]]
yield (d, label)
snake_case_ : Optional[int] = (
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case_ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case_ : List[str] = (
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case_ : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case_ : Tuple = (
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case_ : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A_ :
"""simple docstring"""
a__ = field(metadata={'''help''': '''Which column contains the label'''} )
a__ = field(default=snake_case__ , metadata={'''help''': '''The path of the training file'''} )
a__ = field(default=snake_case__ , metadata={'''help''': '''The path of the development file'''} )
a__ = field(default=snake_case__ , metadata={'''help''': '''The path of the test file'''} )
a__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a__ = field(
default=snake_case__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A_ :
"""simple docstring"""
a__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a__ = field(
default=snake_case__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a__ = field(
default=snake_case__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a__ = field(default=snake_case__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ = field(
default=snake_case__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case_, snake_case_, snake_case_ : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
snake_case_, snake_case_, snake_case_, snake_case_ : int = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=_SCREAMING_SNAKE_CASE ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
snake_case_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(_SCREAMING_SNAKE_CASE ) ,labelaid=_SCREAMING_SNAKE_CASE ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="text-classification" ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
snake_case_ : Dict = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool(".bin" in model_args.model_name_or_path ) ,config=_SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ,)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case_ : List[Any] = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case_ : Any = TFTrainer(
model=_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,train_dataset=_SCREAMING_SNAKE_CASE ,eval_dataset=_SCREAMING_SNAKE_CASE ,compute_metrics=_SCREAMING_SNAKE_CASE ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ : Tuple = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case_ : Union[str, Any] = trainer.evaluate()
snake_case_ : Optional[int] = os.path.join(training_args.output_dir ,"eval_results.txt" )
with open(_SCREAMING_SNAKE_CASE ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(_SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 0 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
if isinstance(__magic_name__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A_ :
"""simple docstring"""
def _A ( self :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def _A ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
def _A ( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(A_ , A_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _A ( self :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ )
snake_case_ : Optional[int] = FlaxVisionTextDualEncoderModel(A_ )
snake_case_ : int = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=None , **lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Optional[Any] = self.get_vision_text_model(A_ , A_ )
snake_case_ : List[str] = {"vision_model": vision_model, "text_model": text_model}
snake_case_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
snake_case_ : List[Any] = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _A ( self :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict=None , **lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.get_vision_text_model(A_ , A_ )
snake_case_ : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
snake_case_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
snake_case_ : Any = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
snake_case_ : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
snake_case_ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(A_ )
snake_case_ : Dict = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
snake_case_ : Optional[Any] = after_output[0]
snake_case_ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-3 )
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int=None , **lowerCAmelCase__ :List[str] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : List[str] = self.get_vision_text_model(A_ , A_ )
snake_case_ : List[str] = {"vision_model": vision_model, "text_model": text_model}
snake_case_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
snake_case_ : Union[str, Any] = model(
input_ids=A_ , pixel_values=A_ , attention_mask=A_ , output_attentions=A_ )
snake_case_ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(A_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : List[Any] = to_atuple(vision_model.config.image_size )
snake_case_ : str = to_atuple(vision_model.config.patch_size )
snake_case_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case_ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case_ : Optional[int] = output.text_model_output.attentions
self.assertEqual(len(A_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
pt_model.to(A_ )
pt_model.eval()
# prepare inputs
snake_case_ : str = inputs_dict
snake_case_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
snake_case_ : Tuple = pt_model(**A_ ).to_tuple()
snake_case_ : List[str] = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(A_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
snake_case_ : int = FlaxVisionTextDualEncoderModel.from_pretrained(A_ , from_pt=A_ )
snake_case_ : str = fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(A_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
snake_case_ : Tuple = VisionTextDualEncoderModel.from_pretrained(A_ , from_flax=A_ )
pt_model_loaded.to(A_ )
pt_model_loaded.eval()
with torch.no_grad():
snake_case_ : List[str] = pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(A_ , pt_output_loaded.numpy() , 4E-2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ )
snake_case_ : int = VisionTextDualEncoderModel(A_ )
snake_case_ : Any = FlaxVisionTextDualEncoderModel(A_ )
snake_case_ : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ )
snake_case_ : List[Any] = fx_state
self.check_pt_flax_equivalence(A_ , A_ , A_ )
def _A ( self :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ )
snake_case_ : Tuple = VisionTextDualEncoderModel(A_ )
snake_case_ : Optional[Any] = FlaxVisionTextDualEncoderModel(A_ )
snake_case_ : int = load_flax_weights_in_pytorch_model(A_ , fx_model.params )
self.check_pt_flax_equivalence(A_ , A_ , A_ )
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A_ )
def _A ( self :str ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A_ )
def _A ( self :Tuple ) -> int:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
self.check_save_load(**A_ )
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A_ )
@is_pt_flax_cross_test
def _A ( self :Tuple ) -> str:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ : Tuple = config_inputs_dict.pop("vision_config" )
snake_case_ : Tuple = config_inputs_dict.pop("text_config" )
snake_case_ : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(A_ , A_ , A_ )
self.check_equivalence_flax_to_pt(A_ , A_ , A_ )
@slow
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.get_pretrained_model_and_inputs()
snake_case_ : Dict = model_a(**A_ )
snake_case_ : int = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A_ )
snake_case_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(A_ )
snake_case_ : Dict = model_a(**A_ )
snake_case_ : List[str] = after_outputs[0]
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
@require_flax
class A_ (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=A_ , text_from_pt=A_ , )
snake_case_ : int = 13
snake_case_ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case_ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
snake_case_ : Union[str, Any] = random_attention_mask([batch_size, 4] )
snake_case_ : List[str] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _A ( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = FlaxViTModel(A_ )
snake_case_ : Any = FlaxBertModel(A_ )
return vision_model, text_model
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = FlaxViTModelTester(self )
snake_case_ : Any = FlaxBertModelTester(self )
snake_case_ : Dict = vit_model_tester.prepare_config_and_inputs()
snake_case_ : Any = bert_model_tester.prepare_config_and_inputs()
snake_case_, snake_case_ : Any = vision_config_and_inputs
snake_case_, snake_case_, snake_case_, snake_case_ : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A_ (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=A_ , text_from_pt=A_ , )
snake_case_ : List[Any] = 13
snake_case_ : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case_ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
snake_case_ : str = random_attention_mask([batch_size, 4] )
snake_case_ : List[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _A ( self :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = FlaxCLIPVisionModel(A_ )
snake_case_ : List[Any] = FlaxBertModel(A_ )
return vision_model, text_model
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = FlaxCLIPVisionModelTester(self )
snake_case_ : List[str] = FlaxBertModelTester(self )
snake_case_ : int = clip_model_tester.prepare_config_and_inputs()
snake_case_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
snake_case_, snake_case_ : int = vision_config_and_inputs
snake_case_, snake_case_, snake_case_, snake_case_ : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
snake_case_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
snake_case_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case_ : Tuple = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=A_ , padding=A_ , return_tensors="np" )
snake_case_ : Tuple = model(**A_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
snake_case_ : Tuple = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , A_ , atol=1E-3 ) )
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[Any] = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int]=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Union[str, Any]=99 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :List[Any]=5 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :List[str]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :str=0.0_2 , lowerCAmelCase__ :List[str]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : int = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Any = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : Optional[Any] = scope
snake_case_ : List[Any] = self.vocab_size - 1
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Any = None
snake_case_ : str = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , *lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
snake_case_ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
snake_case_ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , *lowerCAmelCase__ :Tuple ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , *lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.num_labels
snake_case_ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
(
snake_case_
) : Any = config_and_inputs
snake_case_ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class A_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A ( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple=False ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
snake_case_ : Tuple = inputs_dict["labels"]
snake_case_ : Any = inputs_dict["labels"]
snake_case_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = OpenAIGPTModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase )
def _A ( self :str ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase )
@slow
def _A ( self :List[str] ) -> int:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _A ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCamelCase )
snake_case_ : int = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is
snake_case_ : Union[str, Any] = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class A_ (__lowerCamelCase ):
"""simple docstring"""
a__ = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''text''': Value('''string''' )} )
a__ = Features({'''labels''': ClassLabel} )
a__ = '''text'''
a__ = '''labels'''
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case_ : Dict = copy.deepcopy(self )
snake_case_ : int = self.label_schema.copy()
snake_case_ : Union[str, Any] = features[self.label_column]
snake_case_ : Tuple = label_schema
return task_template
@property
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 706 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : list[list[str]] = [[] for _ in range(__magic_name__ )]
snake_case_ : Dict = key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1 or len(__magic_name__ ) <= key:
return input_string
for position, character in enumerate(__magic_name__ ):
snake_case_ : Dict = position % (lowest * 2) # puts it in bounds
snake_case_ : int = min(__magic_name__ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__magic_name__ )
snake_case_ : List[str] = [''.join(__magic_name__ ) for row in temp_grid]
snake_case_ : Optional[int] = ''.join(__magic_name__ )
return output_string
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = []
snake_case_ : List[str] = key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1:
return input_string
snake_case_ : list[list[str]] = [[] for _ in range(__magic_name__ )] # generates template
for position in range(len(__magic_name__ ) ):
snake_case_ : Tuple = position % (lowest * 2) # puts it in bounds
snake_case_ : str = min(__magic_name__ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
snake_case_ : List[Any] = 0
for row in temp_grid: # fills in the characters
snake_case_ : List[str] = input_string[counter : counter + len(__magic_name__ )]
grid.append(list(__magic_name__ ) )
counter += len(__magic_name__ )
snake_case_ : Tuple = '' # reads as zigzag
for position in range(len(__magic_name__ ) ):
snake_case_ : int = position % (lowest * 2) # puts it in bounds
snake_case_ : str = min(__magic_name__ ,lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __UpperCAmelCase ( __magic_name__ )-> dict[int, str]:
"""simple docstring"""
snake_case_ : Tuple = {}
for key_guess in range(1 ,len(__magic_name__ ) ): # tries every key
snake_case_ : str = decrypt(__magic_name__ ,__magic_name__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowerCamelCase : int = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = to_pil_image(__A )
snake_case_ : Tuple = pil_image.size
snake_case_ : Optional[Any] = pytesseract.image_to_data(__A ,lang=__A ,output_type="dict" ,config=__A )
snake_case_ : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case_ : Dict = [idx for idx, word in enumerate(__A ) if not word.strip()]
snake_case_ : str = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
snake_case_ : Optional[int] = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
snake_case_ : str = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
snake_case_ : int = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ : Tuple = []
for x, y, w, h in zip(__A ,__A ,__A ,__A ):
snake_case_ : str = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
snake_case_ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A ,__A ,__A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ (__A ):
"""simple docstring"""
a__ = ['''pixel_values''']
def __init__( self :List[Any] , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :float = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[float, Iterable[float]] = None , lowerCAmelCase__ :Union[float, Iterable[float]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Optional[str] = "" , **lowerCAmelCase__ :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Tuple = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case_ : Optional[Any] = get_size_dict(lowerCAmelCase__ )
snake_case_ : Optional[Any] = do_resize
snake_case_ : List[Any] = size
snake_case_ : List[str] = resample
snake_case_ : Dict = do_rescale
snake_case_ : str = rescale_value
snake_case_ : Optional[int] = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case_ : List[Any] = apply_ocr
snake_case_ : Union[str, Any] = ocr_lang
snake_case_ : str = tesseract_config
def _A ( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
snake_case_ : Optional[Any] = (size['''height'''], size['''width'''])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, Iterable[float]] , lowerCAmelCase__ :Union[float, Iterable[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Any , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Union[float, Iterable[float]] = None , lowerCAmelCase__ :Union[float, Iterable[float]] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ :List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ )
snake_case_ : List[str] = resample if resample is not None else self.resample
snake_case_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ : str = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(lowerCAmelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
snake_case_ : int = []
snake_case_ : Tuple = []
for image in images:
snake_case_ : Dict = apply_tesseract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
words_batch.append(lowerCAmelCase__ )
boxes_batch.append(lowerCAmelCase__ )
if do_resize:
snake_case_ : int = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
snake_case_ : Tuple = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
snake_case_ : Dict = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__ )
if apply_ocr:
snake_case_ : int = words_batch
snake_case_ : List[str] = boxes_batch
return data
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class A_ (a_ ):
"""simple docstring"""
def __init__( self :int , **lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _A ( self :Tuple , **lowerCAmelCase__ :Any ) -> int:
'''simple docstring'''
snake_case_ : str = {}
if "candidate_labels" in kwargs:
snake_case_ : str = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
snake_case_ : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _A ( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[Any]="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = load_image(_lowerCAmelCase )
snake_case_ : int = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case_ : Any = candidate_labels
snake_case_ : Optional[Any] = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
snake_case_ : Tuple = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
snake_case_ : int = [text_inputs]
return inputs
def _A ( self :Dict , lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = model_inputs.pop("candidate_labels" )
snake_case_ : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
snake_case_ : Tuple = text_inputs[0]
else:
# Batching case.
snake_case_ : List[Any] = text_inputs[0][0]
snake_case_ : Union[str, Any] = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
snake_case_ : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _A ( self :Tuple , lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = model_outputs.pop("candidate_labels" )
snake_case_ : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
snake_case_ : Optional[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case_ : Optional[int] = probs.tolist()
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ : Optional[int] = [scores]
elif self.framework == "tf":
snake_case_ : Optional[Any] = stable_softmax(_lowerCAmelCase , axis=-1 )
snake_case_ : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case_ : List[str] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | 0 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 710 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Optional[int] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ (UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechTaTokenizer
a__ = False
a__ = True
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(_a )
snake_case_ : Optional[Any] = AddedToken("<mask>" , lstrip=_a , rstrip=_a )
snake_case_ : Tuple = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Dict ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = """this is a test"""
snake_case_ : List[Any] = """this is a test"""
return input_text, output_text
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int=False , lowerCAmelCase__ :str=20 , lowerCAmelCase__ :Optional[int]=5 ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.get_input_output_texts(_a )
snake_case_ : List[str] = tokenizer.encode(_a , add_special_tokens=_a )
snake_case_ : str = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = """<pad>"""
snake_case_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(_a ) , 81 )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : int = tokenizer.vocab_size
snake_case_ : str = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[str] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ : List[str] = tokenizer.add_tokens(_a )
snake_case_ : List[str] = tokenizer.vocab_size
snake_case_ : Tuple = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
snake_case_ : Optional[Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Tuple = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ : Union[str, Any] = tokenizer.add_special_tokens(_a )
snake_case_ : Optional[Any] = tokenizer.vocab_size
snake_case_ : Tuple = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
snake_case_ : List[Any] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _A ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
pass
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : str = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
snake_case_ : str = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ : int = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=_a , )
| 712 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
snake_case_ : Union[str, Any] = TOKENIZER_CLASSES
else:
snake_case_ : Optional[Any] = {tokenizer_name: getattr(__UpperCAmelCase ,tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
snake_case_ : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
snake_case_ : Optional[Any] = True
if checkpoint_name is None:
snake_case_ : Any = list(tokenizer_class.max_model_input_sizes.keys() )
else:
snake_case_ : int = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
snake_case_ : List[Any] = tokenizer_class.from_pretrained(__UpperCAmelCase ,force_download=__UpperCAmelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
snake_case_, snake_case_ : Optional[Any] = checkpoint.split("/" )
snake_case_ : Union[str, Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
elif add_prefix:
snake_case_ : Optional[int] = checkpoint
snake_case_ : Dict = dump_path
else:
snake_case_ : Dict = None
snake_case_ : int = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
snake_case_ : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
snake_case_ : List[Any] = file_path.split(__UpperCAmelCase )[-1][0]
if next_char == "/":
snake_case_ : Optional[Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
snake_case_ : List[Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
snake_case_ : str = tokenizer.save_pretrained(
__UpperCAmelCase ,legacy_format=__UpperCAmelCase ,filename_prefix=__UpperCAmelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__UpperCAmelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 713 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : List[Any] = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__lowerCamelCase : Union[str, Any] = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__lowerCamelCase : Dict = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__lowerCamelCase : Optional[int] = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ (__lowerCAmelCase ):
"""simple docstring"""
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''OwlViTImageProcessor'''
a__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self :str , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCamelCase , )
snake_case_ : Optional[int] = kwargs.pop("feature_extractor" )
snake_case_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self :int , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Dict="max_length" , lowerCAmelCase__ :List[str]="np" , **lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )):
snake_case_ : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ):
snake_case_ : Union[str, Any] = []
# Maximum number of queries across batch
snake_case_ : List[str] = max([len(_UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCamelCase ) != max_num_queries:
snake_case_ : Any = t + [""" """] * (max_num_queries - len(_UpperCamelCase ))
snake_case_ : int = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
encodings.append(_UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
snake_case_ : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
snake_case_ : Union[str, Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
snake_case_ : str = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
snake_case_ : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
snake_case_ : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
snake_case_ : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
snake_case_ : List[str] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
snake_case_ : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
snake_case_ : Any = BatchEncoding()
snake_case_ : str = input_ids
snake_case_ : Union[str, Any] = attention_mask
if query_images is not None:
snake_case_ : Optional[Any] = BatchEncoding()
snake_case_ : int = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values
snake_case_ : Tuple = query_pixel_values
if images is not None:
snake_case_ : Any = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
snake_case_ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
snake_case_ : List[str] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def _A ( self :Union[str, Any] , *lowerCAmelCase__ :int , **lowerCAmelCase__ :int ) -> Tuple:
'''simple docstring'''
return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase )
def _A ( self :Tuple , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase )
def _A ( self :int , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase )
def _A ( self :Optional[int] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _A ( self :Any , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def _A ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCamelCase , )
return self.image_processor_class
@property
def _A ( self :Dict ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCamelCase , )
return self.image_processor
| 715 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 716 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A_ (_A ):
"""simple docstring"""
a__ = '''dpt'''
def __init__( self :Optional[int] , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :List[str]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :List[str]="gelu" , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[Any]=384 , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Tuple=[2, 5, 8, 11] , lowerCAmelCase__ :Tuple="project" , lowerCAmelCase__ :List[str]=[4, 2, 1, 0.5] , lowerCAmelCase__ :Tuple=[96, 192, 384, 768] , lowerCAmelCase__ :Optional[Any]=256 , lowerCAmelCase__ :str=-1 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Union[str, Any]=0.4 , lowerCAmelCase__ :Any=255 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Optional[Any]=[1, 1_024, 24, 24] , lowerCAmelCase__ :Optional[int]=[0, 1] , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
snake_case_ : str = hidden_size
snake_case_ : int = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
snake_case_ : str = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
snake_case_ : Optional[int] = BitConfig(**__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
logger.info("Initializing the config with a `BiT` backbone." )
snake_case_ : Union[str, Any] = BitConfig(**__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case_ : Union[str, Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ : int = backbone_featmap_shape
snake_case_ : Tuple = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
snake_case_ : int = None
snake_case_ : List[str] = None
snake_case_ : Any = []
snake_case_ : int = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Union[str, Any] = image_size
snake_case_ : Tuple = patch_size
snake_case_ : str = num_channels
snake_case_ : Dict = qkv_bias
snake_case_ : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
snake_case_ : List[Any] = readout_type
snake_case_ : List[Any] = reassemble_factors
snake_case_ : Union[str, Any] = neck_hidden_sizes
snake_case_ : Tuple = fusion_hidden_size
snake_case_ : Union[str, Any] = head_in_index
snake_case_ : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ : List[Any] = use_auxiliary_head
snake_case_ : Optional[Any] = auxiliary_loss_weight
snake_case_ : Optional[Any] = semantic_loss_ignore_index
snake_case_ : Any = semantic_classifier_dropout
def _A ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ : Tuple = self.backbone_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 717 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A_ (_UpperCAmelCase ):
"""simple docstring"""
a__ = '''mctct'''
def __init__( self :str , lowerCAmelCase__ :Dict=8_065 , lowerCAmelCase__ :Optional[Any]=1_536 , lowerCAmelCase__ :int=36 , lowerCAmelCase__ :List[str]=6_144 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Tuple=384 , lowerCAmelCase__ :str=920 , lowerCAmelCase__ :List[str]=1E-5 , lowerCAmelCase__ :Optional[int]=0.3 , lowerCAmelCase__ :str="relu" , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :str=0.3 , lowerCAmelCase__ :Union[str, Any]=0.3 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :Union[str, Any]=0 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Union[str, Any]=1 , lowerCAmelCase__ :Union[str, Any]=0.3 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Optional[int]=(7,) , lowerCAmelCase__ :List[str]=(3,) , lowerCAmelCase__ :Dict=80 , lowerCAmelCase__ :Union[str, Any]=1 , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :str="sum" , lowerCAmelCase__ :List[Any]=False , **lowerCAmelCase__ :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
snake_case_ : int = vocab_size
snake_case_ : Tuple = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : List[Any] = intermediate_size
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = attention_head_dim
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Tuple = layer_norm_eps
snake_case_ : str = layerdrop
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[Any] = initializer_range
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Tuple = pad_token_id
snake_case_ : Tuple = bos_token_id
snake_case_ : List[str] = eos_token_id
snake_case_ : int = conv_glu_dim
snake_case_ : str = conv_dropout
snake_case_ : Tuple = num_conv_layers
snake_case_ : int = input_feat_per_channel
snake_case_ : Optional[Any] = input_channels
snake_case_ : Any = conv_channels
snake_case_ : Tuple = ctc_loss_reduction
snake_case_ : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ : Dict = list(lowerCamelCase_ )
snake_case_ : Optional[int] = list(lowerCamelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 718 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 0 |
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 ,0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 ,1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 ,0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 ,1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__lowerCamelCase : Dict = get_logger(__name__)
__lowerCamelCase : Any = Path(__file__).parent / """model_card_template.md"""
__lowerCamelCase : int = uuida().hex
__lowerCamelCase : List[str] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
__lowerCamelCase : List[Any] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
__lowerCamelCase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def __UpperCAmelCase ( __magic_name__ = None )-> str:
"""simple docstring"""
snake_case_ : List[str] = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__magic_name__ ,__magic_name__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__magic_name__ ,__magic_name__ ):
ua += "; " + user_agent
return ua
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = None )-> Union[str, Any]:
"""simple docstring"""
if token is None:
snake_case_ : Optional[Any] = HfFolder.get_token()
if organization is None:
snake_case_ : str = whoami(__magic_name__ )["name"]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__magic_name__ ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
snake_case_ : int = args.hub_token if hasattr(__magic_name__ ,"hub_token" ) else None
snake_case_ : Union[str, Any] = get_full_repo_name(__magic_name__ ,token=__magic_name__ )
snake_case_ : int = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__magic_name__ ,model_name=__magic_name__ ,repo_name=__magic_name__ ,dataset_name=args.dataset_name if hasattr(__magic_name__ ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__magic_name__ ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__magic_name__ ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__magic_name__ ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__magic_name__ ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__magic_name__ ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__magic_name__ ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__magic_name__ ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__magic_name__ ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__magic_name__ ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__magic_name__ ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
snake_case_ : Dict = os.path.join(args.output_dir ,"README.md" )
model_card.save(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None )-> str:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ : List[str] = str(Path(__magic_name__ ).as_posix() )
snake_case_ : List[Any] = re.search(r"snapshots/([^/]+)/" ,__magic_name__ )
if search is None:
return None
snake_case_ : Optional[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__magic_name__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__lowerCamelCase : List[str] = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
__lowerCamelCase : Optional[Any] = os.path.join(hf_cache_home, '''diffusers''')
def __UpperCAmelCase ( __magic_name__ = None ,__magic_name__ = None )-> None:
"""simple docstring"""
if new_cache_dir is None:
snake_case_ : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ : Dict = old_diffusers_cache
snake_case_ : str = Path(__magic_name__ ).expanduser()
snake_case_ : int = Path(__magic_name__ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ : Optional[int] = new_cache_dir / old_blob_path.relative_to(__magic_name__ )
new_blob_path.parent.mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
os.replace(__magic_name__ ,__magic_name__ )
try:
os.symlink(__magic_name__ ,__magic_name__ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__lowerCamelCase : str = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
__lowerCamelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
__lowerCamelCase : Optional[int] = int(f.read())
except ValueError:
__lowerCamelCase : Optional[Any] = 0
if cache_version < 1:
__lowerCamelCase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
__lowerCamelCase : Optional[Any] = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'''the directory exists and can be written to.'''
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None )-> str:
"""simple docstring"""
if variant is not None:
snake_case_ : Optional[Any] = weights_name.split("." )
snake_case_ : Tuple = splits[:-1] + [variant] + splits[-1:]
snake_case_ : Any = ".".join(__magic_name__ )
return weights_name
def __UpperCAmelCase ( __magic_name__ ,*,
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None ,)-> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = str(__magic_name__ )
if os.path.isfile(__magic_name__ ):
return pretrained_model_name_or_path
elif os.path.isdir(__magic_name__ ):
if os.path.isfile(os.path.join(__magic_name__ ,__magic_name__ ) ):
# Load from a PyTorch checkpoint
snake_case_ : List[str] = os.path.join(__magic_name__ ,__magic_name__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__magic_name__ ,__magic_name__ ,__magic_name__ ) ):
snake_case_ : Union[str, Any] = os.path.join(__magic_name__ ,__magic_name__ ,__magic_name__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__magic_name__ ).base_version ) >= version.parse("0.20.0" )
):
try:
snake_case_ : Any = hf_hub_download(
__magic_name__ ,filename=_add_variant(__magic_name__ ,__magic_name__ ) ,cache_dir=__magic_name__ ,force_download=__magic_name__ ,proxies=__magic_name__ ,resume_download=__magic_name__ ,local_files_only=__magic_name__ ,use_auth_token=__magic_name__ ,user_agent=__magic_name__ ,subfolder=__magic_name__ ,revision=revision or commit_hash ,)
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__magic_name__ ,)
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__magic_name__ ,__magic_name__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__magic_name__ ,__magic_name__ )}\' so that the correct variant file can be added.''' ,__magic_name__ ,)
try:
# 2. Load model file as usual
snake_case_ : Tuple = hf_hub_download(
__magic_name__ ,filename=__magic_name__ ,cache_dir=__magic_name__ ,force_download=__magic_name__ ,proxies=__magic_name__ ,resume_download=__magic_name__ ,local_files_only=__magic_name__ ,use_auth_token=__magic_name__ ,user_agent=__magic_name__ ,subfolder=__magic_name__ ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'." )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. "
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 720 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = int(__UpperCamelCase )
assert noofclusters < len(__UpperCamelCase )
# Find out the dimensionality
snake_case_ : Optional[int] = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case_ : int = list(range(len(__UpperCamelCase ) ) )
shuffle(__UpperCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case_ : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case_ : Dict = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case_ : str = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case_ : Dict = tf.placeholder("float64" ,[dim] )
snake_case_ : List[str] = []
for centroid in centroids:
cent_assigns.append(tf.assign(__UpperCamelCase ,__UpperCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case_ : Any = [tf.Variable(0 ) for i in range(len(__UpperCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case_ : Tuple = tf.placeholder("int32" )
snake_case_ : Optional[int] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__UpperCamelCase ,__UpperCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case_ : str = tf.placeholder("float" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case_ : List[str] = tf.reduce_mean(__UpperCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case_ : Union[str, Any] = tf.placeholder("float" ,[dim] )
snake_case_ : List[str] = tf.placeholder("float" ,[dim] )
snake_case_ : int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCamelCase ,__UpperCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case_ : List[str] = tf.placeholder("float" ,[noofclusters] )
snake_case_ : Optional[int] = tf.argmin(__UpperCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case_ : List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(__UpperCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case_ : Optional[Any] = 100
for _ in range(__UpperCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__UpperCamelCase ) ):
snake_case_ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case_ : str = [
sess.run(__UpperCamelCase ,feed_dict={va: vect, va: sess.run(__UpperCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case_ : Any = sess.run(
__UpperCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__UpperCamelCase ):
# Collect all the vectors assigned to this cluster
snake_case_ : List[str] = [
vectors[i]
for i in range(len(__UpperCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case_ : Dict = sess.run(
__UpperCamelCase ,feed_dict={mean_input: array(__UpperCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case_ : Optional[Any] = sess.run(__UpperCamelCase )
snake_case_ : Any = sess.run(__UpperCamelCase )
return centroids, assignments
| 721 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = inspect.getfile(accelerate.test_utils )
snake_case_ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
snake_case_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
snake_case_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ : Union[str, Any] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ : Dict = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _A ( self :str ) -> str:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
snake_case_ : Union[str, Any] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = Accelerator()
__lowerCamelCase : Any = (accelerator.state.process_index + 2, 10)
__lowerCamelCase : List[Any] = torch.randint(0, 10, shape).to(accelerator.device)
__lowerCamelCase : Optional[Any] = ''''''
__lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowerCamelCase : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowerCamelCase : Optional[int] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :Any=32 * 8 , lowerCAmelCase__ :Tuple=32 * 8 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :List[Any]=64 , ) -> str:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : Tuple = batch_size
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_auxiliary_loss
snake_case_ : Dict = num_queries
snake_case_ : Tuple = num_channels
snake_case_ : int = min_size
snake_case_ : str = max_size
snake_case_ : Optional[int] = num_labels
snake_case_ : Dict = hidden_dim
snake_case_ : Optional[int] = hidden_dim
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
snake_case_ : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
snake_case_ : List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
snake_case_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
snake_case_ : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _A ( self :Dict ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case_ : List[str] = self.num_queries
snake_case_ : Dict = self.num_labels
snake_case_ : Optional[Any] = [1, 1, 1, 1]
snake_case_ : Any = self.num_channels
snake_case_ : int = 64
snake_case_ : List[str] = 128
snake_case_ : str = self.hidden_dim
snake_case_ : int = self.hidden_dim
snake_case_ : List[str] = self.hidden_dim
return config
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Optional[int] = self.prepare_config_and_inputs()
snake_case_ : int = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _A ( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = output.encoder_hidden_states
snake_case_ : Optional[Any] = output.pixel_decoder_hidden_states
snake_case_ : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_layers )
def _A ( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any=False ) -> Any:
'''simple docstring'''
with torch.no_grad():
snake_case_ : int = MaskaFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
snake_case_ : Tuple = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ : Optional[int] = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
snake_case_ : List[Any] = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ (lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a__ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Any = MaskaFormerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_, snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _A ( self :str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _A ( self :Dict ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _A ( self :int ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(lowerCAmelCase__ )
snake_case_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[Any] = [*signature.parameters.keys()]
snake_case_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case_ : str = MaskaFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = (self.model_tester.min_size,) * 2
snake_case_ : int = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCAmelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(),
}
snake_case_ : Tuple = self.model_tester.get_config()
snake_case_ : str = MaskaFormerForUniversalSegmentation(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def _A ( self :str ) -> Tuple:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : List[Any] = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
snake_case_ : Optional[Any] = self.all_model_classes[1]
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
snake_case_ : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
snake_case_ : str = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.all_model_classes[1]
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs()
snake_case_ : Dict = True
snake_case_ : int = True
snake_case_ : Tuple = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
model.train()
snake_case_ : Any = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case_ : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCamelCase : int = 1E-4
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _A ( self :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ )
snake_case_ : List[str] = self.default_image_processor
snake_case_ : Any = prepare_img()
snake_case_ : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
snake_case_ : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
snake_case_ : List[Any] = model(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
snake_case_ : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
snake_case_ : List[str] = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = self.default_image_processor
snake_case_ : int = prepare_img()
snake_case_ : Tuple = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**lowerCAmelCase__ )
# masks_queries_logits
snake_case_ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case_ : Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
snake_case_ : Any = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
snake_case_ : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case_ : Optional[int] = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = self.default_image_processor
snake_case_ : Dict = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
snake_case_ : Tuple = inputs["pixel_values"].to(lowerCAmelCase__ )
snake_case_ : Optional[int] = [el.to(lowerCAmelCase__ ) for el in inputs["mask_labels"]]
snake_case_ : Tuple = [el.to(lowerCAmelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
snake_case_ : Any = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 701 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = XGLMTokenizer
a__ = XGLMTokenizerFast
a__ = True
a__ = True
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = "<pad>"
snake_case_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_008 )
def _A ( self :str ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
snake_case_ : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _A ( self :str ) -> Optional[int]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
snake_case_ : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__ )
snake_case_ : Tuple = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : List[Any] = "I was born in 92000, and this is falsé."
snake_case_ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = self.get_rust_tokenizer()
snake_case_ : Optional[int] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Dict = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = "Hello World!"
snake_case_ : Tuple = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
snake_case_ : Dict = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : str = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 702 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A_ (UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a__ = '''swin'''
a__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[int]=224 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :List[Any]=96 , lowerCAmelCase__ :Union[str, Any]=[2, 2, 6, 2] , lowerCAmelCase__ :List[Any]=[3, 6, 12, 24] , lowerCAmelCase__ :str=7 , lowerCAmelCase__ :Any=4.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :List[Any]=0.0_2 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Optional[int]=32 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__A )
snake_case_ : str = image_size
snake_case_ : Optional[Any] = patch_size
snake_case_ : Tuple = num_channels
snake_case_ : Any = embed_dim
snake_case_ : Optional[Any] = depths
snake_case_ : Tuple = len(__A )
snake_case_ : int = num_heads
snake_case_ : str = window_size
snake_case_ : Dict = mlp_ratio
snake_case_ : List[str] = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = drop_path_rate
snake_case_ : List[str] = hidden_act
snake_case_ : Union[str, Any] = use_absolute_embeddings
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Optional[int] = int(embed_dim * 2 ** (len(__A ) - 1) )
snake_case_ : List[Any] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
snake_case_ : int = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
class A_ (UpperCamelCase_ ):
"""simple docstring"""
a__ = version.parse('''1.11''' )
@property
def _A ( self :str ) -> Optional[int]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self :int ) -> Dict:
'''simple docstring'''
return 1E-4
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@require_torch
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
snake_case_ : Optional[int] = load_dataset("ashraq/esc50" )
snake_case_ : Optional[int] = dataset["train"]["audio"][-1]["array"]
snake_case_ : int = audio_classifier(lowerCAmelCase__ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"score": 0.5_0_1, "label": "Sound of a dog"}, {"score": 0.4_9_9, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
@require_torch
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
snake_case_ : int = load_dataset("ashraq/esc50" )
snake_case_ : str = dataset["train"]["audio"][-1]["array"]
snake_case_ : List[Any] = audio_classifier(lowerCAmelCase__ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
] , )
snake_case_ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
snake_case_ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
| 704 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :Optional[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = data
snake_case_ : List[Any] = None
snake_case_ : int = None
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
print("\n********Press N to stop entering at any point of time********\n" )
snake_case_ : Union[str, Any] = input("Enter the value of the root node: " ).strip().lower()
snake_case_ : queue.Queue = queue.Queue()
snake_case_ : List[str] = TreeNode(int(snake_case__ ) )
q.put(snake_case__ )
while not q.empty():
snake_case_ : Union[str, Any] = q.get()
snake_case_ : str = F'''Enter the left node of {node_found.data}: '''
snake_case_ : int = input(snake_case__ ).strip().lower() or """n"""
if check == "n":
return tree_node
snake_case_ : Optional[Any] = TreeNode(int(snake_case__ ) )
snake_case_ : List[Any] = left_node
q.put(snake_case__ )
snake_case_ : List[str] = F'''Enter the right node of {node_found.data}: '''
snake_case_ : Optional[Any] = input(snake_case__ ).strip().lower() or """n"""
if check == "n":
return tree_node
snake_case_ : List[Any] = TreeNode(int(snake_case__ ) )
snake_case_ : Union[str, Any] = right_node
q.put(snake_case__ )
raise
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
print(node.data ,end="," )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
in_order(node.left )
print(node.data ,end="," )
in_order(node.right )
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end="," )
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
snake_case_ : queue.Queue = queue.Queue()
q.put(snake_case__ )
while not q.empty():
snake_case_ : int = q.get()
print(node_dequeued.data ,end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
snake_case_ : queue.Queue = queue.Queue()
q.put(snake_case__ )
while not q.empty():
snake_case_ : Any = []
while not q.empty():
snake_case_ : Dict = q.get()
print(node_dequeued.data ,end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case__ )
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
snake_case_ : list[TreeNode] = []
snake_case_ : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end="," )
stack.append(snake_case__ )
snake_case_ : List[Any] = n.left
# end of while means current node doesn't have left child
snake_case_ : Optional[int] = stack.pop()
# start to traverse its right child
snake_case_ : Optional[int] = n.right
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
snake_case_ : list[TreeNode] = []
snake_case_ : Dict = node
while n or stack:
while n:
stack.append(snake_case__ )
snake_case_ : Tuple = n.left
snake_case_ : List[str] = stack.pop()
print(n.data ,end="," )
snake_case_ : Dict = n.right
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or not node:
return
snake_case_ : Union[str, Any] = [], []
snake_case_ : List[Any] = node
stacka.append(snake_case__ )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case_ : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end="," )
def __UpperCAmelCase ( __magic_name__ = "" ,__magic_name__=50 ,__magic_name__="*" )-> Any:
"""simple docstring"""
if not s:
return "\n" + width * char
snake_case_ : Any = divmod(width - len(snake_case__ ) - 2 ,2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
__lowerCamelCase : str = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 706 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase : str = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__lowerCamelCase : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__lowerCamelCase : int = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__lowerCamelCase : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__lowerCamelCase : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__lowerCamelCase : Any = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class A_ (_UpperCAmelCase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A_ (_UpperCAmelCase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Dict = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__lowerCamelCase : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__lowerCamelCase : Union[str, Any] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class A_ :
"""simple docstring"""
def __call__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Union[bool, str] = False , lowerCAmelCase__ :Union[bool, str] = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[bool] = None , **lowerCAmelCase__ :int , ) -> Dict:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
snake_case_ : Tuple = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
snake_case_ : List[str] = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
snake_case_ : str = len(lowercase_ )
snake_case_ : int = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.''' )
snake_case_ : Tuple = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
snake_case_ : int = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
snake_case_ : str = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
snake_case_ : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case_ : Any = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def _A ( self :int , lowerCAmelCase__ :BatchEncoding , lowerCAmelCase__ :DPRReaderOutput , lowerCAmelCase__ :int = 16 , lowerCAmelCase__ :int = 64 , lowerCAmelCase__ :int = 4 , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = reader_input["""input_ids"""]
snake_case_ : Tuple = reader_output[:3]
snake_case_ : Optional[Any] = len(lowercase_ )
snake_case_ : Optional[Any] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
snake_case_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case_ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case_ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case_ : Any = sequence_ids.index(self.pad_token_id )
else:
snake_case_ : Optional[int] = len(lowercase_ )
snake_case_ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A ( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case_ : Dict = sorted(lowercase_ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowercase_ )
snake_case_ : Any = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
snake_case_ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class A_ (_UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ['''input_ids''', '''attention_mask''']
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = jnp.ones((batch_size, length) ) / length
return scores
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = None
snake_case_ : Optional[int] = 20
snake_case_ : Tuple = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__ )
# tweak scores to not be uniform anymore
snake_case_ : List[str] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
snake_case_ : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
snake_case_ : Any = jax.nn.softmax(lowerCAmelCase__ , axis=-1 )
snake_case_ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
snake_case_ : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 )
snake_case_ : Tuple = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = None
snake_case_ : Any = 10
snake_case_ : int = 2
# create ramp distribution
snake_case_ : Dict = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
snake_case_ : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
snake_case_ : Dict = FlaxTopKLogitsWarper(3 )
snake_case_ : int = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
snake_case_ : str = 5
snake_case_ : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
snake_case_ : Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, length) ).copy()
snake_case_ : Union[str, Any] = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = None
snake_case_ : List[str] = 10
snake_case_ : Tuple = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
snake_case_ : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
snake_case_ : int = FlaxTopPLogitsWarper(0.8 )
snake_case_ : Dict = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
snake_case_ : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
snake_case_ : Dict = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
snake_case_ : Dict = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
snake_case_ : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
snake_case_ : Optional[Any] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = 20
snake_case_ : Optional[int] = 4
snake_case_ : Dict = 0
snake_case_ : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ )
# check that min length is applied at length 5
snake_case_ : Dict = ids_tensor((batch_size, 20) , vocab_size=20 )
snake_case_ : Optional[int] = 5
snake_case_ : List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
snake_case_ : Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Dict = 15
snake_case_ : Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = 20
snake_case_ : Tuple = 4
snake_case_ : Dict = 0
snake_case_ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
snake_case_ : List[str] = ids_tensor((batch_size, 1) , vocab_size=20 )
snake_case_ : Optional[int] = 1
snake_case_ : List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
snake_case_ : Tuple = 3
snake_case_ : List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : str = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = 20
snake_case_ : Union[str, Any] = 4
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 5
snake_case_ : int = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
snake_case_ : List[str] = ids_tensor((batch_size, 4) , vocab_size=20 )
snake_case_ : List[str] = 4
snake_case_ : Tuple = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
snake_case_ : List[Any] = 3
snake_case_ : Any = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = 4
snake_case_ : Optional[Any] = 10
snake_case_ : Union[str, Any] = 15
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = 1
snake_case_ : Any = 15
# dummy input_ids and scores
snake_case_ : str = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ )
snake_case_ : int = input_ids.copy()
snake_case_ : List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = scores.copy()
# instantiate all dist processors
snake_case_ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
snake_case_ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ )
snake_case_ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
snake_case_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
snake_case_ : Optional[Any] = 10
# no processor list
snake_case_ : List[Any] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : List[Any] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : List[Any] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : Optional[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : int = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# with processor list
snake_case_ : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case_ : Optional[int] = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = 4
snake_case_ : Any = 10
snake_case_ : Optional[Any] = 15
snake_case_ : int = 2
snake_case_ : List[Any] = 1
snake_case_ : Dict = 15
# dummy input_ids and scores
snake_case_ : List[str] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ )
snake_case_ : List[str] = input_ids.copy()
snake_case_ : int = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = scores.copy()
# instantiate all dist processors
snake_case_ : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ : Union[str, Any] = FlaxTopKLogitsWarper(3 )
snake_case_ : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case_ : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ )
snake_case_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
snake_case_ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
snake_case_ : List[Any] = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] ):
snake_case_ : Union[str, Any] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : Optional[Any] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : Any = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : Optional[Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
snake_case_ : str = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ):
snake_case_ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case_ : List[str] = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ )
return scores
snake_case_ : Tuple = jax.jit(lowerCAmelCase__ )
snake_case_ : str = jax.jit(lowerCAmelCase__ )
snake_case_ : Any = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Dict = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = k_size // 2
snake_case_, snake_case_ : str = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
snake_case_ : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(UpperCAmelCase__ ) + square(UpperCAmelCase__ )) / (2 * square(UpperCAmelCase__ )) )
return g
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_, snake_case_ : Tuple = image.shape[0], image.shape[1]
# dst image height and width
snake_case_ : Any = height - k_size + 1
snake_case_ : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
snake_case_ : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
snake_case_ : str = 0
for i, j in product(range(UpperCAmelCase__ ) ,range(UpperCAmelCase__ ) ):
snake_case_ : Optional[Any] = ravel(image[i : i + k_size, j : j + k_size] )
snake_case_ : int = window
row += 1
# turn the kernel into shape(k*k, 1)
snake_case_ : List[str] = gen_gaussian_kernel(UpperCAmelCase__ ,UpperCAmelCase__ )
snake_case_ : Optional[int] = ravel(UpperCAmelCase__ )
# reshape and get the dst image
snake_case_ : str = dot(UpperCAmelCase__ ,UpperCAmelCase__ ).reshape(UpperCAmelCase__ ,UpperCAmelCase__ ).astype(UpperCAmelCase__ )
return dst
if __name__ == "__main__":
# read original image
__lowerCamelCase : int = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
__lowerCamelCase : str = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__lowerCamelCase : List[str] = gaussian_filter(gray, 3, sigma=1)
__lowerCamelCase : Optional[int] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 710 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[Any]=30 , lowerCAmelCase__ :List[Any]=400 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Any=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[Any]=1 / 255 , lowerCAmelCase__ :Union[str, Any]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : int = parent
snake_case_ : List[Any] = batch_size
snake_case_ : int = num_channels
snake_case_ : List[str] = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Any = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Any = do_normalize
snake_case_ : Tuple = image_mean
snake_case_ : Tuple = image_std
snake_case_ : int = do_rescale
snake_case_ : str = rescale_factor
snake_case_ : int = do_pad
def _A ( self :Any ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : Dict = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
snake_case_, snake_case_ : Optional[Any] = image.size
else:
snake_case_, snake_case_ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Any = int(self.size["shortest_edge"] * h / w )
snake_case_ : str = self.size["shortest_edge"]
elif w > h:
snake_case_ : Any = self.size["shortest_edge"]
snake_case_ : int = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Any = self.size["shortest_edge"]
snake_case_ : int = self.size["shortest_edge"]
else:
snake_case_ : List[Any] = []
for image in image_inputs:
snake_case_, snake_case_ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(UpperCAmelCase_ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : Optional[Any] = max(UpperCAmelCase_ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (__UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a__ = ConditionalDetrImageProcessor if is_vision_available() else None
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
snake_case_ : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def _A ( self :int ) -> int:
'''simple docstring'''
pass
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
snake_case_ : Any = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : List[Any] = json.loads(f.read() )
snake_case_ : Tuple = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Any = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Optional[Any] = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_ )
snake_case_ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_ ) )
# verify boxes
snake_case_ : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
snake_case_ : Union[str, Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_ ) )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_ ) )
# verify size
snake_case_ : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_ ) )
@slow
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : List[str] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : Tuple = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="pt" )
# verify pixel values
snake_case_ : Dict = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_ )
snake_case_ : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_ ) )
# verify is_crowd
snake_case_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_ ) )
# verify class_labels
snake_case_ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase_ )
# verify orig_size
snake_case_ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_ ) )
# verify size
snake_case_ : Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_ ) )
| 711 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (UpperCamelCase_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :Optional[int] , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Optional[int]=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :str=[2, 1, 1] , lowerCAmelCase__ :Optional[Any]=[64, 192, 384] , lowerCAmelCase__ :Union[str, Any]=[1, 3, 6] , lowerCAmelCase__ :Optional[Any]=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :Any=[True, True, True] , lowerCAmelCase__ :Union[str, Any]=[False, False, True] , lowerCAmelCase__ :Optional[int]=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :List[Any]=[3, 3, 3] , lowerCAmelCase__ :List[str]=[1, 1, 1] , lowerCAmelCase__ :Optional[Any]=[2, 2, 2] , lowerCAmelCase__ :List[Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , **lowerCAmelCase__ :Optional[int] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
snake_case_ : int = num_channels
snake_case_ : Tuple = patch_sizes
snake_case_ : Optional[int] = patch_stride
snake_case_ : Tuple = patch_padding
snake_case_ : str = embed_dim
snake_case_ : List[Any] = num_heads
snake_case_ : str = depth
snake_case_ : List[Any] = mlp_ratio
snake_case_ : Any = attention_drop_rate
snake_case_ : List[Any] = drop_rate
snake_case_ : Optional[int] = drop_path_rate
snake_case_ : Tuple = qkv_bias
snake_case_ : Dict = cls_token
snake_case_ : List[str] = qkv_projection_method
snake_case_ : Optional[Any] = kernel_qkv
snake_case_ : str = padding_kv
snake_case_ : Union[str, Any] = stride_kv
snake_case_ : str = padding_q
snake_case_ : List[Any] = stride_q
snake_case_ : str = initializer_range
snake_case_ : Any = layer_norm_eps
| 712 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 ,number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 713 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 714 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 0 |
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__lowerCamelCase : Dict = int(input('''Enter number: ''').strip())
print(f'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 715 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
@property
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.dummy_uncond_unet
snake_case_ : Any = PNDMScheduler()
snake_case_ : int = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : Any = pndm(generator=A_ , num_inference_steps=20 , output_type="numpy" ).images
snake_case_ : Any = torch.manual_seed(0 )
snake_case_ : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type="numpy" , return_dict=A_ )[0]
snake_case_ : Any = image[0, -3:, -3:, -1]
snake_case_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = "google/ddpm-cifar10-32"
snake_case_ : Tuple = UNetaDModel.from_pretrained(A_ )
snake_case_ : str = PNDMScheduler()
snake_case_ : Union[str, Any] = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : Tuple = pndm(generator=A_ , output_type="numpy" ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 716 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = 100 ,)-> str:
"""simple docstring"""
snake_case_ : int = x_start
snake_case_ : Optional[int] = fnc(__magic_name__ )
snake_case_ : str = 0.0
for _ in range(__magic_name__ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case_ : Union[str, Any] = (x_end - x_start) / steps + xa
snake_case_ : str = fnc(__magic_name__ )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
snake_case_ : Any = xa
snake_case_ : Optional[Any] = fxa
return length
if __name__ == "__main__":
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__lowerCamelCase : Tuple = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 717 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" ,__lowerCAmelCase ,)
if isinstance(__lowerCAmelCase ,torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase ,PIL.Image.Image ):
snake_case_ : Union[str, Any] = [image]
if isinstance(image[0] ,PIL.Image.Image ):
snake_case_ : List[Any] = image[0].size
snake_case_ : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case_ : int = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
snake_case_ : List[Any] = np.concatenate(__lowerCAmelCase ,axis=0 )
snake_case_ : str = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
snake_case_ : Optional[Any] = image.transpose(0 ,3 ,1 ,2 )
snake_case_ : Optional[int] = 2.0 * image - 1.0
snake_case_ : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] ,torch.Tensor ):
snake_case_ : Union[str, Any] = torch.cat(__lowerCAmelCase ,dim=0 )
return image
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
if isinstance(__lowerCAmelCase ,torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase ,PIL.Image.Image ):
snake_case_ : Optional[int] = [mask]
if isinstance(mask[0] ,PIL.Image.Image ):
snake_case_ : str = mask[0].size
snake_case_ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case_ : Tuple = [np.array(m.convert("L" ).resize((w, h) ,resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
snake_case_ : int = np.concatenate(__lowerCAmelCase ,axis=0 )
snake_case_ : Optional[int] = mask.astype(np.floataa ) / 255.0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 1
snake_case_ : List[Any] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0] ,torch.Tensor ):
snake_case_ : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
return mask
class A_ (UpperCAmelCase__ ):
"""simple docstring"""
a__ = 42
a__ = 42
def __init__( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self :Union[str, Any] , lowerCAmelCase__ :Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase__ :Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase__ :int = 250 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
snake_case_ : Optional[Any] = image
snake_case_ : Tuple = _preprocess_image(lowerCamelCase__ )
snake_case_ : List[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ : int = _preprocess_mask(lowerCamelCase__ )
snake_case_ : List[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ : int = original_image.shape
snake_case_ : List[str] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.device )
snake_case_ : str = eta
snake_case_ : Optional[int] = self.scheduler.timesteps[0] + 1
snake_case_ : Optional[int] = generator[0] if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case_ : Any = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute previous image: x_t -> x_t-1
snake_case_ : Optional[int] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case_ : Union[str, Any] = self.scheduler.undo_step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
snake_case_ : Tuple = t
snake_case_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 718 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Mock()
snake_case_ : Any = conn, Mock()
snake_case_ : int = iter([1, None] )
snake_case_ : str = lambda __magic_name__ : next(lowercase__ )
# ===== invoke =====
send_file(filename="mytext.txt" ,testing=lowercase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 719 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowerCamelCase : Any = None
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
__lowerCamelCase : Optional[Any] = {
'google/rembert': 256,
}
__lowerCamelCase : Tuple = '▁'
class A_ (__lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = RemBertTokenizer
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=False , lowerCAmelCase__ :Tuple="[CLS]" , lowerCAmelCase__ :Optional[Any]="[SEP]" , lowerCAmelCase__ :Union[str, Any]="<unk>" , lowerCAmelCase__ :List[str]="[SEP]" , lowerCAmelCase__ :str="<pad>" , lowerCAmelCase__ :str="[CLS]" , lowerCAmelCase__ :List[str]="[MASK]" , **lowerCAmelCase__ :Dict , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
snake_case_ : Optional[Any] = do_lower_case
snake_case_ : str = remove_space
snake_case_ : Any = keep_accents
snake_case_ : Optional[Any] = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
def _A ( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self :Optional[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def _A ( self :Dict , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
snake_case_ : Optional[Any] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 720 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = True ,__magic_name__ = math.inf ,__magic_name__ = -math.inf ,__magic_name__ = math.inf ,__magic_name__ = -math.inf ,__magic_name__ = False ,__magic_name__ = 100 ,__magic_name__ = 0.01 ,__magic_name__ = 1 ,)-> List[Any]:
"""simple docstring"""
snake_case_ : Dict = False
snake_case_ : List[str] = search_prob
snake_case_ : List[Any] = start_temperate
snake_case_ : Union[str, Any] = []
snake_case_ : List[str] = 0
snake_case_ : List[Any] = None
while not search_end:
snake_case_ : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case_ : str = current_state
scores.append(_UpperCamelCase )
iterations += 1
snake_case_ : Tuple = None
snake_case_ : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case_ : Any = random.randint(0 ,len(_UpperCamelCase ) - 1 ) # picking a random neighbor
snake_case_ : Optional[int] = neighbors.pop(_UpperCamelCase )
snake_case_ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case_ : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case_ : Dict = picked_neighbor
else:
snake_case_ : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case_ : Union[str, Any] = picked_neighbor
snake_case_ : str = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case_ : str = True
else:
snake_case_ : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) ,_UpperCamelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : str = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
__lowerCamelCase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : Tuple = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
__lowerCamelCase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : Optional[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 721 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__lowerCamelCase : int = False
class A_ (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Any = pipe.dual_guided(
prompt="first prompt" , image=A__ , text_to_image_strength=0.7_5 , generator=A__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A__ )
snake_case_ : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(A__ , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Any = generator.manual_seed(0 )
snake_case_ : str = pipe.dual_guided(
prompt="first prompt" , image=A__ , text_to_image_strength=0.7_5 , generator=A__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Optional[int] = "cyberpunk 2077"
snake_case_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : List[str] = pipe.dual_guided(
prompt=A__ , image=A__ , text_to_image_strength=0.7_5 , generator=A__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Any = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
snake_case_ : List[Any] = "A painting of a squirrel eating a burger "
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : str = pipe.text_to_image(
prompt=A__ , generator=A__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
snake_case_ : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[str] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
snake_case_ : Optional[Any] = pipe.image_variation(A__ , generator=A__ , output_type="numpy" ).images
snake_case_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return 1
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __magic_name__ = 200 )-> int:
"""simple docstring"""
return two_pound(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 701 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCamelCase : Any = parse(importlib.metadata.version('''torch'''))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
snake_case_ : int = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase__ ,lowercase__ ):
snake_case_ : Any = parse(importlib.metadata.version(lowercase__ ) )
return operation(lowercase__ ,parse(lowercase__ ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
return compare_versions(lowercase__ ,lowercase__ ,lowercase__ )
| 702 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )-> Dict:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True )-> Dict:
"""simple docstring"""
model.train()
snake_case_ : Optional[int] = model(lowerCAmelCase_ )
snake_case_ : Any = F.mse_loss(lowerCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase_ )
def __UpperCAmelCase ( __magic_name__ , __magic_name__=False )-> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
snake_case_ : Optional[Any] = RegressionModel()
snake_case_ : Dict = deepcopy(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = RegressionDataset(length=80 )
snake_case_ : List[Any] = DataLoader(lowerCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ : str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ : str = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda __magic_name__ : epoch**0.65 )
snake_case_ : Any = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda __magic_name__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = get_training_setup(lowerCAmelCase_ )
# Use a single batch
snake_case_ : Any = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ : int = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ : Dict = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = get_training_setup(lowerCAmelCase_ )
# Use a single batch
snake_case_ : int = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ : List[Any] = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def __UpperCAmelCase ( __magic_name__=False , __magic_name__=False )-> int:
"""simple docstring"""
snake_case_ : Any = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ : Union[str, Any] = get_training_setup(lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ : List[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ : str = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
GradientState._reset_state()
def __UpperCAmelCase ( __magic_name__=False , __magic_name__=False )-> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ : int = get_training_setup(lowerCAmelCase_ , lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
snake_case_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ : Any = accelerator.gather((ddp_input, ddp_target) )
snake_case_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
snake_case_ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
snake_case_ : int = Accelerator()
snake_case_ : Optional[int] = RegressionDataset(length=80 )
snake_case_ : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 )
snake_case_ : Optional[int] = RegressionDataset(length=96 )
snake_case_ : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 )
snake_case_ : Optional[int] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if iteration < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if batch_num < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = Accelerator()
snake_case_ : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCAmelCase_ , lowerCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowerCamelCase : int = logging.get_logger(__name__)
class A_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self :Tuple , *lowerCAmelCase__ :int , **lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 704 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int]=14 , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Union[str, Any]=99 , lowerCAmelCase__ :Dict=32 , lowerCAmelCase__ :List[Any]=5 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :List[str]=37 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :List[Any]=512 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str=None , ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : int = is_training
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : Optional[int] = use_input_mask
snake_case_ : Dict = use_labels
snake_case_ : Any = use_mc_token_ids
snake_case_ : str = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : int = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : List[str] = num_labels
snake_case_ : Any = num_choices
snake_case_ : Union[str, Any] = scope
snake_case_ : int = self.vocab_size - 1
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_input_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
if self.use_token_type_ids:
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = None
if self.use_mc_token_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
snake_case_ : List[str] = None
snake_case_ : Any = None
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Tuple = self.get_config()
snake_case_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self :Any ) -> Dict:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Dict = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case )
model(__snake_case , token_type_ids=__snake_case )
snake_case_ : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
snake_case_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.prepare_config_and_inputs()
(
snake_case_
) : Optional[int] = config_and_inputs
snake_case_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _A ( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , *lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.num_labels
snake_case_ : Optional[int] = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[str] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
a__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = CTRLModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=__snake_case , n_embd=37 )
def _A ( self :str ) -> str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@slow
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Dict = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def _A ( self :List[str] ) -> str:
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Dict ) -> List[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(__snake_case )
snake_case_ : Any = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__snake_case ) # Legal the president is
snake_case_ : List[str] = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
snake_case_ : int = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() , __snake_case )
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
snake_case_ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
snake_case_ : Any = "A painting of a squirrel eating a burger"
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[Any] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
snake_case_ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
snake_case_ : Any = "A painting of a squirrel eating a burger"
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
snake_case_ : Tuple = output.images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Any = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
snake_case_ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
snake_case_ : str = "A painting of a squirrel eating a burger"
snake_case_ : Any = torch.manual_seed(0 )
snake_case_ : Optional[Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=lowerCAmelCase__ , )
snake_case_ : Optional[int] = output.images
snake_case_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : int = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 706 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __UpperCAmelCase ( __magic_name__ )-> list[list[float]]:
"""simple docstring"""
snake_case_ : Dict = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case_ : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
snake_case_ : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
snake_case_ : int = matrix[1][1], matrix[0][0]
snake_case_ : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case_ : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
snake_case_ : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case_ : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case_ : Optional[int] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case_ : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case_ : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case_ : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case_ : List[str] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case_ : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case_ : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case_ : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case_ : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
snake_case_ : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case_ : List[str] = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.