code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
snake_case : List[Any] = 192
snake_case : int = 768
snake_case : Tuple = 12
snake_case : Union[str, Any] = 3
snake_case : Optional[Any] = [800, 1333]
snake_case : List[str] = False
elif yolos_name == "yolos_s_dWr":
snake_case : Any = 330
snake_case : Tuple = 14
snake_case : Any = 6
snake_case : Optional[Any] = 1320
elif "yolos_s" in yolos_name:
snake_case : Union[str, Any] = 384
snake_case : List[str] = 1536
snake_case : Tuple = 12
snake_case : Optional[Any] = 6
elif "yolos_b" in yolos_name:
snake_case : Any = [800, 1344]
snake_case : List[Any] = 91
snake_case : Optional[Any] = "huggingface/label-files"
snake_case : Dict = "coco-detection-id2label.json"
snake_case : Tuple = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
snake_case : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
snake_case : Union[str, Any] = idalabel
snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( __lowerCamelCase : dict , __lowerCamelCase : YolosConfig , __lowerCamelCase : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[: config.hidden_size, :]
snake_case : Any = in_proj_bias[: config.hidden_size]
snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : int = in_proj_weight[-config.hidden_size :, :]
snake_case : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( __lowerCamelCase : str ):
if "backbone" in name:
snake_case : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
snake_case : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
snake_case : Any = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
snake_case : str = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
snake_case : Any = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case : List[str] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
snake_case : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
snake_case : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case : Optional[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case : Tuple = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case : str = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
snake_case : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
snake_case : str = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
snake_case : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def UpperCamelCase ( __lowerCamelCase : dict , __lowerCamelCase : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
snake_case : List[Any] = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
snake_case : Optional[int] = key.split("." )
snake_case : List[Any] = int(key_split[2] )
snake_case : str = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
snake_case : List[str] = val[:dim, :]
snake_case : Union[str, Any] = val[
dim : dim * 2, :
]
snake_case : Dict = val[-dim:, :]
else:
snake_case : List[str] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Any = val
return orig_state_dict
def UpperCamelCase ( ):
snake_case : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
snake_case : Tuple = get_yolos_config(__lowerCamelCase )
# load original state_dict
snake_case : Optional[Any] = torch.load(__lowerCamelCase , map_location="cpu" )["model"]
# load 🤗 model
snake_case : Optional[Any] = YolosForObjectDetection(__lowerCamelCase )
model.eval()
snake_case : int = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
snake_case : int = 800 if yolos_name != "yolos_ti" else 512
snake_case : List[Any] = YolosImageProcessor(format="coco_detection" , size=__lowerCamelCase )
snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case : Any = model(**__lowerCamelCase )
snake_case , snake_case : Any = outputs.logits, outputs.pred_boxes
snake_case , snake_case : Tuple = None, None
if yolos_name == "yolos_ti":
snake_case : str = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
snake_case : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
snake_case : Optional[int] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
snake_case : str = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
snake_case : Optional[int] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
snake_case : List[Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
snake_case : List[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
snake_case : List[str] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
snake_case : Dict = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
snake_case : int = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
snake_case : List[Any] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase , organization="hustvl" )
model.push_to_hub(__lowerCamelCase , organization="hustvl" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__lowerCamelCase = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = ["input_ids", "attention_mask"]
A__ : List[int] = []
def __init__(self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : Dict="<pad>" , snake_case__ : str="[SEP]" , snake_case__ : Optional[Any]="[MASK]" , snake_case__ : Any="[CLS]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : int , ) -> None:
'''simple docstring'''
snake_case : List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
snake_case : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
snake_case : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
snake_case : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
snake_case : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
snake_case : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , mask_token=snake_case__ , cls_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Optional[Any] = vocab_file
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = self.__dict__.copy()
snake_case : List[Any] = None
return state
def __setstate__(self : Union[str, Any] , snake_case__ : Dict ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Optional[Any] = {}
snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.sp_model.IdToPiece(snake_case__ )
return token
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = []
snake_case : Union[str, Any] = ""
snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
snake_case : List[Any] = True
snake_case : Optional[int] = []
else:
current_sub_tokens.append(snake_case__ )
snake_case : Any = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : bool = False , snake_case__ : bool = None , snake_case__ : bool = True , **snake_case__ : Optional[int] , ) -> str:
'''simple docstring'''
snake_case : Any = kwargs.pop("use_source_tokenizer" , snake_case__ )
snake_case : List[str] = self.convert_ids_to_tokens(snake_case__ , skip_special_tokens=snake_case__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case : List[str] = []
snake_case : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
snake_case : Optional[Any] = []
sub_texts.append(snake_case__ )
else:
current_sub_text.append(snake_case__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case : Any = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(snake_case__ ) )
else:
snake_case : Optional[int] = "".join(snake_case__ )
snake_case : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case : int = self.clean_up_tokenization(snake_case__ )
return clean_text
else:
return text
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : List[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : int = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Dict = [self.cls_token_id]
snake_case : Any = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : str = [self.sep_token_id]
snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 10 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 1 |
__lowerCamelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__lowerCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__lowerCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 10 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 10 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
snake_case : Any = self.dummy_uncond_unet
snake_case : List[Any] = KarrasVeScheduler()
snake_case : Optional[Any] = KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : int = torch.manual_seed(0 )
snake_case : List[str] = pipe(num_inference_steps=2 , generator=snake_case__ , output_type="numpy" ).images
snake_case : Any = torch.manual_seed(0 )
snake_case : Tuple = pipe(num_inference_steps=2 , generator=snake_case__ , output_type="numpy" , return_dict=snake_case__ )[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = "google/ncsnpp-celebahq-256"
snake_case : Optional[Any] = UNetaDModel.from_pretrained(snake_case__ )
snake_case : int = KarrasVeScheduler()
snake_case : Optional[Any] = KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : str = torch.manual_seed(0 )
snake_case : Any = pipe(num_inference_steps=20 , generator=snake_case__ , output_type="numpy" ).images
snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 10 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowerCamelCase = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 10 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : int = r"\w+[.]\d+"
snake_case : Any = re.findall(__lowerCamelCase , __lowerCamelCase )
for pat in pats:
snake_case : Dict = key.replace(__lowerCamelCase , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
snake_case : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case : str = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case : Union[str, Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case : Optional[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
snake_case : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case : Optional[int] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case : Tuple = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int=42 ):
# Step 1: Convert pytorch tensor to numpy
snake_case : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case : Dict = flax_model.init_weights(PRNGKey(__lowerCamelCase ) )
snake_case : List[str] = flatten_dict(__lowerCamelCase )
snake_case : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case : Optional[int] = rename_key(__lowerCamelCase )
snake_case : Optional[Any] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
snake_case , snake_case : Optional[Any] = rename_key_and_reshape_tensor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
snake_case : Tuple = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
| 10 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 1 |
import re
import string
import numpy as np
import datasets
__lowerCamelCase = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
__lowerCamelCase = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
__lowerCamelCase = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict=None , snake_case__ : Dict=False , snake_case__ : str=False , snake_case__ : Union[str, Any]=False , ) -> Any:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case : int = np.array([re.sub(snake_case__ , "" , snake_case__ ) for x in predictions] )
snake_case : List[Any] = np.array([re.sub(snake_case__ , "" , snake_case__ ) for x in references] )
else:
snake_case : Optional[int] = np.asarray(snake_case__ )
snake_case : Any = np.asarray(snake_case__ )
if ignore_case:
snake_case : List[str] = np.char.lower(snake_case__ )
snake_case : Union[str, Any] = np.char.lower(snake_case__ )
if ignore_punctuation:
snake_case : Optional[Any] = string.punctuation.maketrans("" , "" , string.punctuation )
snake_case : List[str] = np.char.translate(snake_case__ , table=snake_case__ )
snake_case : int = np.char.translate(snake_case__ , table=snake_case__ )
if ignore_numbers:
snake_case : Optional[int] = string.digits.maketrans("" , "" , string.digits )
snake_case : Union[str, Any] = np.char.translate(snake_case__ , table=snake_case__ )
snake_case : Optional[Any] = np.char.translate(snake_case__ , table=snake_case__ )
snake_case : Optional[int] = predictions == references
return {"exact_match": np.mean(snake_case__ ) * 1_00}
| 10 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__lowerCamelCase = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCamelCase ( __lowerCamelCase : str ):
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
snake_case : Any = [image]
snake_case : List[Any] = [trans(img.convert("RGB" ) ) for img in image]
snake_case : Union[str, Any] = torch.stack(__lowerCamelCase )
return image
class UpperCAmelCase ( A_ ):
def __init__(self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Tuple ) -> List[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : str = min(int(num_inference_steps * strength ) , snake_case__ )
snake_case : Optional[int] = max(num_inference_steps - init_timestep , 0 )
snake_case : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : int=None ) -> List[str]:
'''simple docstring'''
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
snake_case : str = image.to(device=snake_case__ , dtype=snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case : Union[str, Any] = init_latents.shape
snake_case : Optional[int] = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
print("add noise to latents at timestep" , snake_case__ )
snake_case : Dict = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
snake_case : Tuple = init_latents
return latents
@torch.no_grad()
def __call__(self : str , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] = None , snake_case__ : float = 0.8 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : float = 0.0 , snake_case__ : int = 50 , snake_case__ : Optional[bool] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(snake_case__ )
# 2. Preprocess image
snake_case : Dict = preprocess(snake_case__ )
# 3. set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
snake_case , snake_case : List[str] = self.get_timesteps(snake_case__ , snake_case__ , self.device )
snake_case : Optional[Any] = timesteps[:1].repeat(snake_case__ )
# 4. Prepare latent variables
snake_case : Any = self.prepare_latents(snake_case__ , snake_case__ , snake_case__ , self.unet.dtype , self.device , snake_case__ )
snake_case : Union[str, Any] = latents
# 5. Denoising loop
for t in self.progress_bar(snake_case__ ):
# 1. predict noise model_output
snake_case : Dict = self.unet(snake_case__ , snake_case__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case : Optional[int] = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , eta=snake_case__ , use_clipped_model_output=snake_case__ , generator=snake_case__ , ).prev_sample
snake_case : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Union[str, Any] = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=snake_case__ )
| 10 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : str = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case : Union[str, Any] = tf_top_k_top_p_filtering(snake_case__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case : Optional[Any] = output[output != -float("inf" )]
snake_case : Any = tf.cast(
tf.where(tf.not_equal(snake_case__ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-12 )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ,A_ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
A__ : str = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : List[Any] = 2
snake_case : List[str] = 2
class UpperCAmelCase ( tf.Module ):
def __init__(self : Optional[Any] , snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
super(snake_case__ , self ).__init__()
snake_case : str = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.model.generate(
input_ids=snake_case__ , attention_mask=snake_case__ , max_new_tokens=snake_case__ , return_dict_in_generate=snake_case__ , )
return {"sequences": outputs["sequences"]}
snake_case : List[Any] = [[2, 0], [1_02, 1_03]]
snake_case : Any = [[1, 0], [1, 1]]
snake_case : str = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__ , snake_case__ , signatures={"serving_default": dummy_model.serving} )
snake_case : List[str] = tf.saved_model.load(snake_case__ ).signatures["serving_default"]
for batch_size in range(1 , len(snake_case__ ) + 1 ):
snake_case : int = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case : Dict = serving_func(**snake_case__ )["sequences"]
snake_case : Optional[int] = test_model.generate(**snake_case__ , max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : str = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Tuple = 1
snake_case : Any = 2
class UpperCAmelCase ( tf.Module ):
def __init__(self : List[Any] , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
super(snake_case__ , self ).__init__()
snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : int , snake_case__ : List[str] ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = self.model.generate(
input_ids=snake_case__ , attention_mask=snake_case__ , max_new_tokens=snake_case__ , return_dict_in_generate=snake_case__ , )
return {"sequences": outputs["sequences"]}
snake_case : List[Any] = [[2], [1_02, 1_03]]
snake_case : Union[str, Any] = [[1], [1, 1]]
snake_case : Optional[Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__ , snake_case__ , signatures={"serving_default": dummy_model.serving} )
snake_case : int = tf.saved_model.load(snake_case__ ).signatures["serving_default"]
for input_row in range(len(snake_case__ ) ):
snake_case : List[str] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case : str = serving_func(**snake_case__ )["sequences"]
snake_case : Optional[int] = test_model.generate(**snake_case__ , max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@slow
@require_tensorflow_text
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=snake_case__ )
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__(self : str ) -> Any:
'''simple docstring'''
super().__init__()
snake_case : List[str] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case__ , "spiece.model" ) , "rb" ).read() )
snake_case : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , *snake_case__ : Any , **snake_case__ : List[str] ) -> Dict:
'''simple docstring'''
snake_case : str = self.tokenizer.tokenize(snake_case__ )
snake_case , snake_case : str = text.pad_model_inputs(
snake_case__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case : str = self.model.generate(input_ids=snake_case__ , attention_mask=snake_case__ )
return self.tokenizer.detokenize(snake_case__ )
snake_case : Union[str, Any] = CompleteSentenceTransformer()
snake_case : Any = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
snake_case : List[Any] = complete_model(snake_case__ )
snake_case : Optional[Any] = tf.keras.Model(snake_case__ , snake_case__ )
keras_model.save(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> int:
'''simple docstring'''
snake_case : int = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
snake_case : List[Any] = 14
snake_case : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : str = "Hello, my dog is cute and"
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="tf" )
snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case : int = model.generate(**snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case : Tuple = [6_38, 1_98]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case : Union[str, Any] = model.generate(**snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case : Tuple = "Hugging Face is a technology company based in New York and Paris."
snake_case : int = bart_tokenizer(snake_case__ , return_tensors="tf" ).input_ids
snake_case : Optional[int] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case : str = bart_model.generate(snake_case__ ).numpy()
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[Any] , snake_case__ : int=None , **snake_case__ : int ) -> Dict:
'''simple docstring'''
return super().call(snake_case__ , **snake_case__ )
snake_case : str = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case : Tuple = bart_model.generate(snake_case__ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(snake_case__ , snake_case__ ) )
class UpperCAmelCase ( bart_model.model.encoder.__class__ ):
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple , **snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return super().call(snake_case__ , **snake_case__ )
snake_case : Tuple = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case : Tuple = bart_model.generate(snake_case__ ).numpy()
with self.assertRaises(snake_case__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case__ , foo="bar" )
| 10 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Any ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
class UpperCAmelCase ( A_ ):
def __init__(self : Tuple , snake_case__ : "AutoTokenizer" , snake_case__ : bool = False , **snake_case__ : List[Any] ) -> str:
'''simple docstring'''
snake_case : str = tokenizer
snake_case : List[Any] = skip_prompt
snake_case : List[str] = decode_kwargs
# variables used in the streaming process
snake_case : Tuple = []
snake_case : List[str] = 0
snake_case : Optional[int] = True
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
snake_case : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case : str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
snake_case : Dict = text[self.print_len :]
snake_case : Dict = []
snake_case : Optional[Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case : str = text[self.print_len :]
self.print_len += len(snake_case__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case : int = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(snake_case__ )
self.on_finalized_text(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
if len(self.token_cache ) > 0:
snake_case : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case : int = text[self.print_len :]
snake_case : Tuple = []
snake_case : Dict = 0
else:
snake_case : Tuple = ""
snake_case : List[str] = True
self.on_finalized_text(snake_case__ , stream_end=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : bool = False ) -> Tuple:
'''simple docstring'''
print(snake_case__ , flush=snake_case__ , end="" if not stream_end else None )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , snake_case__ : "AutoTokenizer" , snake_case__ : bool = False , snake_case__ : Optional[float] = None , **snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ , **snake_case__ )
snake_case : Tuple = Queue()
snake_case : Dict = None
snake_case : Dict = timeout
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : bool = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(snake_case__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__(self : Optional[int] ) -> Tuple:
'''simple docstring'''
return self
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Any = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( A_ ):
def __init__(self : Dict , *snake_case__ : Any , snake_case__ : str=None , snake_case__ : Union[str, Any]=None , **snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
snake_case : Any = eval_examples
snake_case : Any = post_process_function
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Tuple=None , snake_case__ : List[str]=None , snake_case__ : Optional[int]=None , snake_case__ : str = "eval" ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case : List[Any] = self.get_eval_dataloader(snake_case__ )
snake_case : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : Dict = self.compute_metrics
snake_case : str = None
snake_case : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
snake_case : List[Any] = time.time()
try:
snake_case : str = eval_loop(
snake_case__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
snake_case : Optional[Any] = compute_metrics
snake_case : List[str] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case : Any = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
snake_case : List[str] = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
snake_case : Any = metrics.pop(snake_case__ )
metrics.update(output.metrics )
else:
snake_case : Any = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=None , snake_case__ : str = "test" ) -> str:
'''simple docstring'''
snake_case : Tuple = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : List[Any] = self.compute_metrics
snake_case : int = None
snake_case : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
snake_case : List[str] = time.time()
try:
snake_case : List[Any] = eval_loop(
snake_case__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
snake_case : Tuple = compute_metrics
snake_case : List[Any] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case : Union[str, Any] = self.post_process_function(snake_case__ , snake_case__ , output.predictions , "predict" )
snake_case : Tuple = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
snake_case : int = metrics.pop(snake_case__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
| 10 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import argparse
import copy
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
snake_case : Tuple = {}
with open(__lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case : Any = []
_list.append([line.split()[0], line.split()[2]] )
snake_case : Tuple = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
with open(__lowerCamelCase ) as f:
snake_case : Dict = f.read(1 )
snake_case : Union[str, Any] = start_node
snake_case : Dict = []
snake_case : int = start_node
snake_case : Optional[int] = 0
while visiting not in first_solution:
snake_case : Dict = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowerCamelCase ) and k[0] not in first_solution:
snake_case : Optional[int] = k[1]
snake_case : str = k[0]
first_solution.append(__lowerCamelCase )
snake_case : List[str] = distance_of_first_solution + int(__lowerCamelCase )
snake_case : Any = best_node
first_solution.append(__lowerCamelCase )
snake_case : Any = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
snake_case : Optional[Any] = []
for n in solution[1:-1]:
snake_case : List[Any] = solution.index(__lowerCamelCase )
for kn in solution[1:-1]:
snake_case : Optional[int] = solution.index(__lowerCamelCase )
if n == kn:
continue
snake_case : str = copy.deepcopy(__lowerCamelCase )
snake_case : List[str] = kn
snake_case : List[Any] = n
snake_case : Tuple = 0
for k in _tmp[:-1]:
snake_case : Optional[int] = _tmp[_tmp.index(__lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case : Union[str, Any] = distance + int(i[1] )
_tmp.append(__lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
snake_case : str = 1
snake_case : Dict = first_solution
snake_case : Dict = []
snake_case : List[str] = distance_of_first_solution
snake_case : int = solution
while count <= iters:
snake_case : Tuple = find_neighborhood(__lowerCamelCase , __lowerCamelCase )
snake_case : List[str] = 0
snake_case : Tuple = neighborhood[index_of_best_solution]
snake_case : List[Any] = len(__lowerCamelCase ) - 1
snake_case : List[Any] = False
while not found:
snake_case : int = 0
while i < len(__lowerCamelCase ):
if best_solution[i] != solution[i]:
snake_case : Optional[Any] = best_solution[i]
snake_case : Optional[Any] = solution[i]
break
snake_case : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case : Optional[int] = True
snake_case : Union[str, Any] = best_solution[:-1]
snake_case : Union[str, Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case : Optional[Any] = cost
snake_case : Optional[Any] = solution
else:
snake_case : Optional[Any] = index_of_best_solution + 1
snake_case : Tuple = neighborhood[index_of_best_solution]
if len(__lowerCamelCase ) >= size:
tabu_list.pop(0 )
snake_case : List[Any] = count + 1
return best_solution_ever, best_cost
def UpperCamelCase ( __lowerCamelCase : Union[str, Any]=None ):
snake_case : List[str] = generate_neighbours(args.File )
snake_case , snake_case : List[str] = generate_first_solution(
args.File , __lowerCamelCase )
snake_case , snake_case : str = tabu_search(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 10 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 1 |
import numpy as np
from PIL import Image
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : str = np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
snake_case : Optional[int] = 0
snake_case : Tuple = 0
snake_case : Union[str, Any] = 0
snake_case : List[Any] = 0
# compute the shape of the output matrix
snake_case : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
snake_case : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
snake_case : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case : List[str] = 0
snake_case : List[Any] = 0
return updated_arr
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
snake_case : Union[str, Any] = 0
snake_case : Optional[int] = 0
snake_case : Optional[int] = 0
snake_case : List[str] = 0
# compute the shape of the output matrix
snake_case : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
snake_case : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
snake_case : Dict = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case : Dict = 0
snake_case : Any = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
__lowerCamelCase = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 10 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : List[str] = "pix2struct_text_model"
A__ : Optional[Any] = ["past_key_values"]
A__ : Optional[int] = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : Tuple , snake_case__ : Tuple=5_02_44 , snake_case__ : Optional[Any]=7_68 , snake_case__ : str=64 , snake_case__ : Optional[int]=20_48 , snake_case__ : Dict=12 , snake_case__ : Tuple=12 , snake_case__ : Dict=32 , snake_case__ : List[str]=1_28 , snake_case__ : Any=0.1 , snake_case__ : Optional[Any]=1e-6 , snake_case__ : Dict=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Tuple=0 , snake_case__ : Optional[int]=False , snake_case__ : str=0 , snake_case__ : List[Any]=1 , snake_case__ : Union[str, Any]=False , snake_case__ : Optional[int]=True , **snake_case__ : Any , ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[Any] = d_kv
snake_case : Dict = d_ff
snake_case : Dict = num_layers
snake_case : Any = num_heads
snake_case : List[str] = relative_attention_num_buckets
snake_case : List[str] = relative_attention_max_distance
snake_case : int = dropout_rate
snake_case : int = layer_norm_epsilon
snake_case : Dict = initializer_factor
snake_case : Dict = use_cache
snake_case : int = eos_token_id
snake_case : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
snake_case : Any = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
snake_case , snake_case : Union[str, Any] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
snake_case : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCAmelCase ( A_ ):
A__ : Union[str, Any] = "pix2struct_vision_model"
def __init__(self : Dict , snake_case__ : Tuple=7_68 , snake_case__ : Optional[int]=7_68 , snake_case__ : int=20_48 , snake_case__ : List[Any]=64 , snake_case__ : str=12 , snake_case__ : Tuple=12 , snake_case__ : List[Any]="gelu_new" , snake_case__ : str=1e-6 , snake_case__ : str=0.0 , snake_case__ : str=0.0 , snake_case__ : Tuple=1e-10 , snake_case__ : List[str]=1.0 , snake_case__ : Optional[int]=40_96 , snake_case__ : str=32 , snake_case__ : Dict=1_28 , **snake_case__ : Dict , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : Optional[int] = hidden_size
snake_case : str = patch_embed_hidden_size
snake_case : List[str] = d_ff
snake_case : Optional[int] = dropout_rate
snake_case : str = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Tuple = initializer_range
snake_case : Union[str, Any] = initializer_factor
snake_case : Optional[Any] = attention_dropout
snake_case : List[str] = layer_norm_eps
snake_case : str = dense_act_fn
snake_case : Dict = seq_len
snake_case : int = relative_attention_num_buckets
snake_case : List[str] = relative_attention_max_distance
snake_case : Any = d_kv
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Any , snake_case__ : Union[str, os.PathLike] , **snake_case__ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
snake_case , snake_case : Dict = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
snake_case : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCAmelCase ( A_ ):
A__ : str = "pix2struct"
A__ : Tuple = True
def __init__(self : Any , snake_case__ : List[Any]=None , snake_case__ : List[Any]=None , snake_case__ : str=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : Dict=False , snake_case__ : Union[str, Any]=False , snake_case__ : Any=True , **snake_case__ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
snake_case : Optional[int] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
snake_case : int = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
snake_case : List[str] = PixaStructTextConfig(**snake_case__ )
snake_case : Any = PixaStructVisionConfig(**snake_case__ )
snake_case : Any = self.text_config.decoder_start_token_id
snake_case : Optional[Any] = self.text_config.pad_token_id
snake_case : Dict = self.text_config.eos_token_id
snake_case : Tuple = initializer_factor
snake_case : Optional[int] = initializer_range
snake_case : int = self.initializer_range
snake_case : Dict = self.initializer_range
snake_case : Any = is_vqa
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Any , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : Dict = self.text_config.to_dict()
snake_case : Optional[Any] = self.vision_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 10 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 1 |
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
snake_case : List[str] = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case : Dict = model(snake_case__ )["last_hidden_state"]
snake_case : int = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
snake_case : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 10 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase ( A_ ):
def __init__(self : Tuple , snake_case__ : Any , snake_case__ : str=None , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = parent
snake_case : Union[str, Any] = config_class
snake_case : Dict = has_text_modality
snake_case : int = kwargs
snake_case : Any = common_properties
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : str = self.config_class(**self.inputs_dict )
snake_case : int = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
snake_case : Dict = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict )
snake_case : Optional[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : str = os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
snake_case : Any = self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
snake_case : Tuple = self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = self.config_class(**self.inputs_dict )
snake_case : List[str] = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Tuple = os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
snake_case : Any = self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_class.is_composition:
return
snake_case : Dict = self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = copy.deepcopy(snake_case__ )
snake_case : List[str] = self.config_class(**snake_case__ )
snake_case : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
snake_case : Tuple = "\n".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 10 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 1 |
def UpperCamelCase ( ):
snake_case : Optional[int] = 0
for i in range(1 , 1001 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( A__ ,unittest.TestCase ):
A__ : List[str] = RoCBertTokenizer
A__ : Optional[Any] = None
A__ : Optional[int] = False
A__ : int = True
A__ : Tuple = filter_non_english
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
snake_case : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
snake_case : Dict = {}
snake_case : Union[str, Any] = {}
for i, value in enumerate(__A ):
snake_case : List[Any] = i
snake_case : List[str] = i
snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case : Dict = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__A , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
snake_case : str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Any = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : int = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
snake_case : Any = {}
for i, token in enumerate(__A ):
snake_case : int = i
snake_case : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__A , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
snake_case : List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
snake_case : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case : List[Any] = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
snake_case : List[str] = tokenizer_r.do_lower_case if hasattr(__A , "do_lower_case" ) else False
snake_case : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = ["""的""", """人""", """有"""]
snake_case : Optional[Any] = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : str = True
snake_case : Tuple = self.tokenizer_class.from_pretrained(__A , **__A )
snake_case : str = self.rust_tokenizer_class.from_pretrained(__A , **__A )
snake_case : Optional[Any] = tokenizer_p.encode(__A , add_special_tokens=__A )
snake_case : str = tokenizer_r.encode(__A , add_special_tokens=__A )
snake_case : List[Any] = tokenizer_r.convert_ids_to_tokens(__A )
snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
snake_case : Union[str, Any] = False
snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(__A , **__A )
snake_case : str = self.tokenizer_class.from_pretrained(__A , **__A )
snake_case : Dict = tokenizer_r.encode(__A , add_special_tokens=__A )
snake_case : Tuple = tokenizer_p.encode(__A , add_special_tokens=__A )
snake_case : Dict = tokenizer_r.convert_ids_to_tokens(__A )
snake_case : Optional[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case : int = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case : Tuple = tokenizer.encode("你好" , add_special_tokens=__A )
snake_case : Union[str, Any] = tokenizer.encode("你是谁" , add_special_tokens=__A )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(__A )
snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
snake_case : str = """你好,你是谁"""
snake_case : List[str] = tokenizer.tokenize(__A )
snake_case : Any = tokenizer.convert_tokens_to_ids(__A )
snake_case : Tuple = tokenizer.convert_tokens_to_shape_ids(__A )
snake_case : Dict = tokenizer.convert_tokens_to_pronunciation_ids(__A )
snake_case : List[Any] = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
snake_case : Union[str, Any] = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase ( _UpperCAmelCase ):
A__ : bool = field(default=_UpperCAmelCase ,metadata={"help": "Whether to use SortishSampler or not."} )
A__ : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
A__ : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} ,)
A__ : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} ,)
A__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_UpperCAmelCase ,metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} ,)
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case : Dict = v.to_dict()
return d
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase ( A__ ):
A__ : int = "EncodecFeatureExtractor"
A__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__(self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
snake_case : Union[str, Any] = self.feature_extractor
snake_case : int = False
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Any=None , snake_case__ : List[Any]=None , snake_case__ : Dict=True ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__(self : Optional[Any] , *snake_case__ : List[Any] , **snake_case__ : List[Any] ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case : Any = kwargs.pop("audio" , __snake_case )
snake_case : Union[str, Any] = kwargs.pop("sampling_rate" , __snake_case )
snake_case : Optional[int] = kwargs.pop("text" , __snake_case )
if len(__snake_case ) > 0:
snake_case : List[str] = args[0]
snake_case : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
snake_case : Optional[Any] = self.tokenizer(__snake_case , **__snake_case )
if audio is not None:
snake_case : Union[str, Any] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
snake_case : List[Any] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
snake_case : Tuple = audio_inputs["padding_mask"]
return inputs
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case : Any = kwargs.pop("audio" , __snake_case )
snake_case : Dict = kwargs.pop("padding_mask" , __snake_case )
if len(__snake_case ) > 0:
snake_case : str = args[0]
snake_case : Any = args[1:]
if audio_values is not None:
return self._decode_audio(__snake_case , padding_mask=__snake_case )
else:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _SCREAMING_SNAKE_CASE (self : int , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Dict , snake_case__ : Optional = None ) -> List[np.ndarray]:
'''simple docstring'''
snake_case : str = to_numpy(__snake_case )
snake_case , snake_case , snake_case : Dict = audio_values.shape
if padding_mask is None:
return list(__snake_case )
snake_case : List[Any] = to_numpy(__snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
snake_case : Optional[int] = seq_len - padding_mask.shape[-1]
snake_case : Dict = 1 - self.feature_extractor.padding_value
snake_case : Optional[Any] = np.pad(__snake_case , ((0, 0), (0, difference)) , "constant" , constant_values=__snake_case )
snake_case : str = audio_values.tolist()
for i in range(__snake_case ):
snake_case : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
snake_case : Optional[int] = sliced_audio.reshape(__snake_case , -1 )
return audio_values
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
snake_case : int = 0
snake_case : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case : Tuple = i + 1
else:
snake_case : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase ( nn.Module ):
def __init__(self : Optional[int] , snake_case__ : int = 16 , snake_case__ : int = 88 , snake_case__ : Optional[int] = None , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "geglu" , snake_case__ : Optional[int] = None , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_a , attention_head_dim=_a , in_channels=_a , num_layers=_a , dropout=_a , norm_num_groups=_a , cross_attention_dim=_a , attention_bias=_a , sample_size=_a , num_vector_embeds=_a , activation_fn=_a , num_embeds_ada_norm=_a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
snake_case : Dict = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
snake_case : Optional[Any] = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
snake_case : List[str] = [1, 0]
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Any=None , snake_case__ : List[str]=None , snake_case__ : str=None , snake_case__ : bool = True , ) -> str:
'''simple docstring'''
snake_case : List[Any] = hidden_states
snake_case : int = []
snake_case : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
snake_case : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
snake_case : Union[str, Any] = self.transformer_index_for_condition[i]
snake_case : Any = self.transformers[transformer_index](
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , return_dict=_a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
snake_case : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
snake_case : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_a )
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowerCamelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowerCamelCase = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
__lowerCamelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCamelCase = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowerCamelCase = """allenai"""
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : List[Any] = dict((re.sub(r"@@$" , "" , __snake_case ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , __snake_case ), v) for k, v in d.items() )
snake_case : Tuple = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
snake_case : Any = d[k] # restore
return da
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
assert os.path.exists(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
snake_case : str = basename(__snake_case )
snake_case : int = dirname(__snake_case )
snake_case : Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
snake_case : Optional[Any] = cls.hub_models()
snake_case : str = {"bpe": "fastbpe", "tokenizer": "moses"}
snake_case : Optional[int] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
snake_case : Optional[Any] = hub_utils.from_pretrained(
__snake_case , __snake_case , __snake_case , archive_map=__snake_case , **__snake_case )
snake_case : Tuple = vars(chkpt["args"]["model"] )
snake_case : Tuple = args["source_lang"]
snake_case : Optional[int] = args["target_lang"]
snake_case : List[Any] = dirname(__snake_case )
snake_case : Union[str, Any] = basename(__snake_case )
# dicts
snake_case : Any = os.path.join(__snake_case , f"""dict.{src_lang}.txt""" )
snake_case : Tuple = os.path.join(__snake_case , f"""dict.{tgt_lang}.txt""" )
snake_case : Optional[int] = Dictionary.load(__snake_case )
snake_case : List[str] = rewrite_dict_keys(src_dict.indices )
snake_case : List[str] = len(__snake_case )
snake_case : Optional[int] = os.path.join(__snake_case , "vocab-src.json" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
snake_case : List[Any] = True
for k in src_vocab.keys():
if not k.islower():
snake_case : int = False
break
snake_case : str = Dictionary.load(__snake_case )
snake_case : List[Any] = rewrite_dict_keys(tgt_dict.indices )
snake_case : Optional[int] = len(__snake_case )
snake_case : int = os.path.join(__snake_case , "vocab-tgt.json" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# merges_file (bpecodes)
snake_case : Tuple = os.path.join(__snake_case , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
snake_case : Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.exists(__snake_case ):
break
with open(__snake_case , encoding="utf-8" ) as fin:
snake_case : List[str] = fin.read()
snake_case : str = re.sub(r" \d+$" , "" , __snake_case , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(__snake_case , "w" , encoding="utf-8" ) as fout:
fout.write(__snake_case )
# model config
snake_case : List[str] = os.path.join(__snake_case , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
snake_case : Optional[Any] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
snake_case : Tuple = 5
snake_case : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
snake_case : Optional[int] = best_score_hparams[model_dir]["length_penalty"]
else:
snake_case : Optional[int] = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# tokenizer config
snake_case : List[Any] = os.path.join(__snake_case , __snake_case )
snake_case : Union[str, Any] = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# model
snake_case : Optional[Any] = chkpt["models"][0]
snake_case : Tuple = model.state_dict()
# rename keys to start with 'model.'
snake_case : Optional[Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
snake_case : Dict = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__snake_case , __snake_case )
snake_case : Any = FSMTConfig.from_pretrained(__snake_case )
snake_case : Any = FSMTForConditionalGeneration(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case , strict=__snake_case )
# save
snake_case : Tuple = os.path.join(__snake_case , __snake_case )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(__snake_case , __snake_case )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase ( A_ ):
A__ : Optional[int] = '''M-CLIP'''
def __init__(self : Any , snake_case__ : List[Any]=10_24 , snake_case__ : str=7_68 , **snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = transformerDimSize
snake_case : List[str] = imageDimSize
super().__init__(**_snake_case )
class UpperCAmelCase ( A_ ):
A__ : Any = MCLIPConfig
def __init__(self : List[str] , snake_case__ : str , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(_snake_case , *_snake_case , **_snake_case )
snake_case : Optional[int] = XLMRobertaModel(_snake_case )
snake_case : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> str:
'''simple docstring'''
snake_case : List[str] = self.transformer(input_ids=_snake_case , attention_mask=_snake_case )[0]
snake_case : Union[str, Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_snake_case ), embs
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger("""transformers.models.speecht5""")
__lowerCamelCase = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
__lowerCamelCase = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
__lowerCamelCase = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
__lowerCamelCase = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
__lowerCamelCase = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
__lowerCamelCase = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
__lowerCamelCase = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
__lowerCamelCase = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
__lowerCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCamelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase = []
__lowerCamelCase = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
__lowerCamelCase = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
__lowerCamelCase = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
__lowerCamelCase = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
for attribute in key.split("." ):
snake_case : Any = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
snake_case : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
snake_case : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Tuple = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : str = value
elif weight_type == "bias":
snake_case : Optional[Any] = value
elif weight_type == "running_mean":
snake_case : List[Any] = value
elif weight_type == "running_var":
snake_case : Any = value
elif weight_type == "num_batches_tracked":
snake_case : str = value
else:
snake_case : Any = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case : List[str] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str ):
snake_case : str = []
if task == "s2t":
snake_case : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case : int = MAPPING_S2T
snake_case : Dict = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case : List[str] = None
snake_case : int = MAPPING_T2S
snake_case : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case : Dict = hf_model.speechta.encoder.prenet.feature_encoder
snake_case : Union[str, Any] = MAPPING_S2S
snake_case : Union[str, Any] = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCamelCase_ , lowerCamelCase_ ):
logger.info(f"""{name} was ignored""" )
continue
snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
snake_case : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case : int = key.split(".*." )
if prefix in name and suffix in name:
snake_case : Tuple = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case : Optional[int] = True
if "*" in mapped_key:
snake_case : str = name.split(lowerCamelCase_ )[0].split("." )[-2]
snake_case : List[Any] = mapped_key.replace("*" , lowerCamelCase_ )
if "weight_g" in name:
snake_case : Optional[Any] = """weight_g"""
elif "weight_v" in name:
snake_case : List[Any] = """weight_v"""
elif "bias" in name:
snake_case : Optional[int] = """bias"""
elif "weight" in name:
snake_case : Optional[int] = """weight"""
elif "running_mean" in name:
snake_case : List[str] = """running_mean"""
elif "running_var" in name:
snake_case : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
snake_case : Union[str, Any] = """num_batches_tracked"""
else:
snake_case : Dict = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
snake_case : Dict = full_name.split("conv_layers." )[-1]
snake_case : Dict = name.split("." )
snake_case : List[Any] = int(items[0] )
snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , ):
if config_path is not None:
snake_case : Tuple = SpeechTaConfig.from_pretrained(lowerCamelCase_ )
else:
snake_case : Any = SpeechTaConfig()
if task == "s2t":
snake_case : Optional[Any] = config.max_text_positions
snake_case : Union[str, Any] = SpeechTaForSpeechToText(lowerCamelCase_ )
elif task == "t2s":
snake_case : Union[str, Any] = 1876
snake_case : Tuple = 600
snake_case : Union[str, Any] = config.max_speech_positions
snake_case : Any = SpeechTaForTextToSpeech(lowerCamelCase_ )
elif task == "s2s":
snake_case : Optional[int] = 1876
snake_case : int = config.max_speech_positions
snake_case : Any = SpeechTaForSpeechToSpeech(lowerCamelCase_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
snake_case : int = SpeechTaTokenizer(lowerCamelCase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case : Dict = AddedToken("<mask>" , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
snake_case : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case : List[str] = SpeechTaFeatureExtractor()
snake_case : Tuple = SpeechTaProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
snake_case : Tuple = torch.load(lowerCamelCase_ )
recursively_load_weights(fairseq_checkpoint["model"] , lowerCamelCase_ , lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : int , snake_case__ : Dict , snake_case__ : List[str]=7 , snake_case__ : Any=3 , snake_case__ : List[str]=10 , snake_case__ : str=18 , snake_case__ : List[str]=30 , snake_case__ : str=4_00 , snake_case__ : List[Any]=True , snake_case__ : Tuple=None , snake_case__ : List[Any]=True , snake_case__ : Tuple=[0.5, 0.5, 0.5] , snake_case__ : Tuple=[0.5, 0.5, 0.5] , snake_case__ : List[str]=None , ) -> Dict:
'''simple docstring'''
snake_case : Any = size if size is not None else {"shortest_edge": 18}
snake_case : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
snake_case : int = parent
snake_case : Union[str, Any] = batch_size
snake_case : Optional[int] = num_channels
snake_case : Optional[int] = num_frames
snake_case : Tuple = image_size
snake_case : Tuple = min_resolution
snake_case : List[Any] = max_resolution
snake_case : Tuple = do_resize
snake_case : Any = size
snake_case : Dict = do_normalize
snake_case : str = image_mean
snake_case : int = image_std
snake_case : str = crop_size
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> int:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( __A ,unittest.TestCase ):
A__ : List[Any] = VivitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = VivitImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , "image_mean" ) )
self.assertTrue(hasattr(__lowercase , "image_std" ) )
self.assertTrue(hasattr(__lowercase , "do_normalize" ) )
self.assertTrue(hasattr(__lowercase , "do_resize" ) )
self.assertTrue(hasattr(__lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(__lowercase , "size" ) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for video in video_inputs:
self.assertIsInstance(__lowercase , __lowercase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
snake_case : Union[str, Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : List[Any] = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for video in video_inputs:
self.assertIsInstance(__lowercase , __lowercase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
snake_case : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : Union[str, Any] = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for video in video_inputs:
self.assertIsInstance(__lowercase , __lowercase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
snake_case : Dict = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : List[str] = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
def UpperCamelCase ( ):
snake_case : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCAmelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
__lowerCamelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCamelCase = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
__lowerCamelCase = [2, 4, 1, 5]
__lowerCamelCase = len(train_data)
__lowerCamelCase = 0.009
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]="train" ):
return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output(
lowercase__ , lowercase__ )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : int = 0
for i in range(len(lowercase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=m ):
snake_case : Optional[int] = 0
for i in range(lowercase__ ):
if index == -1:
summation_value += _error(lowercase__ )
else:
summation_value += _error(lowercase__ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( __lowerCamelCase : Tuple ):
snake_case : Dict = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m
return cost_derivative_value
def UpperCamelCase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case : Tuple = 0.00_0002
snake_case : Optional[int] = 0
snake_case : Optional[int] = 0
while True:
j += 1
snake_case : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(lowercase__ ) ):
snake_case : List[Any] = get_cost_derivative(i - 1 )
snake_case : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ):
break
snake_case : List[Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCamelCase ( ):
for i in range(len(lowercase__ ) ):
print(("Actual output value:", output(lowercase__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowercase__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
__lowerCamelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
__lowerCamelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
__lowerCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCamelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCamelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( a_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = DPRContextEncoderTokenizer
class UpperCAmelCase ( a_ ):
A__ : int = VOCAB_FILES_NAMES
A__ : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : List[str] = DPRQuestionEncoderTokenizer
__lowerCamelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowerCamelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowerCamelCase = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a_ )
class UpperCAmelCase :
def __call__(self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any] = None , snake_case__ : Union[str, Any] = None , snake_case__ : str = False , snake_case__ : str = False , snake_case__ : Any = None , snake_case__ : int = None , snake_case__ : Optional[Any] = None , **snake_case__ : Any , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
snake_case : List[Any] = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
snake_case : int = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
snake_case : List[Any] = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
snake_case : int = len(lowercase_ )
snake_case : Tuple = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
assert len(lowercase_ ) == len(
lowercase_ ), f"""There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts."""
snake_case : List[str] = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["input_ids"]
snake_case : int = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["input_ids"]
snake_case : Tuple = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
snake_case : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case : Optional[int] = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any] = 16 , snake_case__ : Union[str, Any] = 64 , snake_case__ : Dict = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case : Dict = reader_input["input_ids"]
snake_case , snake_case , snake_case : Optional[Any] = reader_output[:3]
snake_case : Union[str, Any] = len(lowercase_ )
snake_case : Optional[int] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
snake_case : Any = []
for doc_id in sorted_docs:
snake_case : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case : Any = sequence_ids.index(self.pad_token_id )
else:
snake_case : List[Any] = len(lowercase_ )
snake_case : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case : List[Any] = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case : Tuple = sorted(lowercase_ , key=lambda snake_case__ : x[1] , reverse=lowercase_ )
snake_case : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case : int = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class UpperCAmelCase ( a_ ,a_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
A__ : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = READER_PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = ['''input_ids''', '''attention_mask''']
A__ : Any = DPRReaderTokenizer
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class UpperCAmelCase :
A__ : List[str] = list_field(
default=[] ,metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} ,)
A__ : List[int] = list_field(
default=[8] ,metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
A__ : List[int] = list_field(
default=[8, 32, 1_28, 5_12] ,metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} ,)
A__ : bool = field(
default=__lowercase ,metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} ,)
A__ : bool = field(
default=__lowercase ,metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} ,)
A__ : bool = field(
default=__lowercase ,metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
A__ : bool = field(default=__lowercase ,metadata={"help": "Use FP16 to accelerate inference."} )
A__ : bool = field(default=__lowercase ,metadata={"help": "Benchmark training of model"} )
A__ : bool = field(default=__lowercase ,metadata={"help": "Verbose memory tracing"} )
A__ : bool = field(
default=__lowercase ,metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} ,)
A__ : bool = field(
default=__lowercase ,metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} ,)
A__ : bool = field(default=__lowercase ,metadata={"help": "Trace memory line by line"} )
A__ : bool = field(default=__lowercase ,metadata={"help": "Save result to a CSV file"} )
A__ : bool = field(default=__lowercase ,metadata={"help": "Save all print statements in a log file"} )
A__ : bool = field(default=__lowercase ,metadata={"help": "Whether to print environment information"} )
A__ : bool = field(
default=__lowercase ,metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} ,)
A__ : str = field(
default=F"""inference_time_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving time results to csv."} ,)
A__ : str = field(
default=F"""inference_memory_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving memory results to csv."} ,)
A__ : str = field(
default=F"""train_time_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving time results to csv for training."} ,)
A__ : str = field(
default=F"""train_memory_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving memory results to csv for training."} ,)
A__ : str = field(
default=F"""env_info_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving environment information."} ,)
A__ : str = field(
default=F"""log_{round(time() )}.csv""" ,metadata={"help": "Log filename used if print statements are saved in log."} ,)
A__ : int = field(default=3 ,metadata={"help": "Times an experiment will be run."} )
A__ : bool = field(
default=__lowercase ,metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} ,)
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , _a , )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__(self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any=False , snake_case__ : List[str]=True , snake_case__ : str=False , snake_case__ : str="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : str="<unk>" , snake_case__ : Tuple="<sep>" , snake_case__ : Tuple="<pad>" , snake_case__ : Any="<cls>" , snake_case__ : List[str]="<mask>" , snake_case__ : int=["<eop>", "<eod>"] , snake_case__ : Optional[int] = None , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
snake_case : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
snake_case : int = 3
snake_case : str = do_lower_case
snake_case : Optional[int] = remove_space
snake_case : List[Any] = keep_accents
snake_case : Optional[int] = vocab_file
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
snake_case : List[str] = jieba
snake_case : Any = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Any = None
return state
def __setstate__(self : Any , snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Union[str, Any] = {}
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[str] ) -> Tuple:
'''simple docstring'''
if self.remove_space:
snake_case : Optional[int] = """ """.join(inputs.strip().split() )
else:
snake_case : str = inputs
snake_case : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
snake_case : Optional[int] = unicodedata.normalize("NFKD" , __UpperCAmelCase )
snake_case : List[str] = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
snake_case : Tuple = outputs.lower()
return outputs
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = self.preprocess_text(__UpperCAmelCase )
snake_case : int = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
snake_case : Union[str, Any] = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
snake_case : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case : Optional[Any] = cur_pieces[1:]
else:
snake_case : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Any ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : int = None ) -> List[int]:
'''simple docstring'''
snake_case : Tuple = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : int , snake_case__ : Any = None , snake_case__ : Dict = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
snake_case : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Any , snake_case__ : Dict = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : List[str] = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : int , *snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
snake_case : Any = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : str = BarthezTokenizer
A__ : List[Any] = BarthezTokenizerFast
A__ : Optional[int] = True
A__ : Optional[int] = True
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
snake_case : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
snake_case : List[str] = tokenizer
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = "<pad>"
snake_case : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase__ ) , 10_11_22 )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
snake_case : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
snake_case : Optional[int] = [0, 57, 30_18, 7_03_07, 91, 2]
snake_case : int = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
snake_case : str = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : Optional[int] = self.get_tokenizer()
snake_case : Optional[int] = self.get_rust_tokenizer()
snake_case : Tuple = "I was born in 92000, and this is falsé."
snake_case : Dict = tokenizer.tokenize(lowerCAmelCase__ )
snake_case : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case : Optional[Any] = self.get_rust_tokenizer()
snake_case : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
snake_case : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = {"input_ids": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
snake_case : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
import os
def UpperCamelCase ( ):
with open(os.path.dirname(_UpperCamelCase ) + "/p022_names.txt" ) as file:
snake_case : str = str(file.readlines()[0] )
snake_case : Optional[int] = names.replace("\"" , "" ).split("," )
names.sort()
snake_case : Optional[Any] = 0
snake_case : int = 0
for i, name in enumerate(_UpperCamelCase ):
for letter in name:
name_score += ord(_UpperCamelCase ) - 64
total_score += (i + 1) * name_score
snake_case : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from typing import Any
class UpperCAmelCase :
def __init__(self : Any , snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Dict = data
snake_case : str = None
class UpperCAmelCase :
def __init__(self : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = None
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.head
while temp is not None:
print(temp.data , end=" " )
snake_case : Optional[Any] = temp.next
print()
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = Node(_lowerCamelCase )
snake_case : Any = self.head
snake_case : Any = new_node
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict , snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
snake_case : str = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case : Optional[int] = node_a.next
snake_case : Union[str, Any] = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case : List[str] = node_a.next
if node_a is None or node_a is None:
return
snake_case : Dict = node_a.data, node_a.data
if __name__ == "__main__":
__lowerCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : set ):
snake_case : Any = len(__SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
snake_case : List[Any] = 0
count += depth_first_search(__SCREAMING_SNAKE_CASE , row + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , row - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col + 1 , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col - 1 , __SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , ):
snake_case : Union[str, Any] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
snake_case : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
snake_case : Optional[Any] = f"""for \'{compression_format}\' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase__ )
assert base_extractor.is_extractable(UpperCAmelCase__ )
snake_case : Union[str, Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case : Optional[int] = file_path.read_text(encoding="utf-8" )
else:
snake_case : Optional[Any] = output_path.read_text(encoding="utf-8" )
snake_case : int = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , ):
snake_case : str = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
snake_case : List[Any] = input_paths[compression_format]
if input_path is None:
snake_case : Any = f"""for \'{compression_format}\' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase__ )
snake_case : str = Extractor.infer_extractor_format(UpperCAmelCase__ )
assert extractor_format is not None
snake_case : Optional[Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case : Dict = file_path.read_text(encoding="utf-8" )
else:
snake_case : List[Any] = output_path.read_text(encoding="utf-8" )
snake_case : Optional[int] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Any ):
import tarfile
snake_case : List[Any] = tmp_path / """data_dot_dot"""
directory.mkdir()
snake_case : Any = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(UpperCAmelCase__ , "w" ) as f:
f.add(UpperCAmelCase__ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
import tarfile
snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
snake_case : List[Any] = directory / """tar_file_with_sym_link.tar"""
os.symlink(".." , directory / "subdir" , target_is_directory=UpperCAmelCase__ )
with tarfile.TarFile(UpperCAmelCase__ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
snake_case : List[Any] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
snake_case : Union[str, Any] = insecure_tar_files[insecure_tar_file]
snake_case : List[str] = tmp_path / """extracted"""
TarExtractor.extract(UpperCAmelCase__ , UpperCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCamelCase ( __lowerCamelCase : str ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
snake_case : List[str] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
snake_case : Union[str, Any] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("wb" ) as f:
f.write(UpperCAmelCase__ )
assert zipfile.is_zipfile(str(UpperCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCAmelCase__ ) # but we're right
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : list ):
if len(__a ) <= 1:
return lst
snake_case : int = 1
while i < len(__a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case , snake_case : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case : Optional[int] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
if "emb" in name:
snake_case : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
snake_case : int = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
snake_case : str = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
snake_case : Union[str, Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
snake_case : Any = name.replace("linear2" , "fc2" )
if "norm1" in name:
snake_case : List[str] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
snake_case : List[Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
snake_case : Any = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
snake_case : int = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
snake_case : Optional[int] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case : List[str] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
snake_case : Dict = list(state_dict.keys() )
snake_case : List[Any] = {}
for key in keys:
snake_case : int = state_dict.pop(__lowerCamelCase )
snake_case : int = rename_keys(__lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case : List[str] = val[:hidden_size, :]
snake_case : int = val[hidden_size : 2 * hidden_size, :]
snake_case : Any = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case : Optional[int] = val
else:
snake_case : List[Any] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase ( __lowerCamelCase : Any ):
if checkpoint == "small":
# default config values
snake_case : Tuple = 1024
snake_case : List[str] = 24
snake_case : Optional[int] = 16
elif checkpoint == "medium":
snake_case : Optional[int] = 1536
snake_case : Tuple = 48
snake_case : Any = 24
elif checkpoint == "large":
snake_case : str = 2048
snake_case : Union[str, Any] = 48
snake_case : Dict = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case : Union[str, Any] = MusicgenDecoderConfig(
hidden_size=__lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , )
return config
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]="cpu" ):
snake_case : List[Any] = MusicGen.get_pretrained(__lowerCamelCase , device=__lowerCamelCase )
snake_case : List[str] = decoder_config_from_checkpoint(__lowerCamelCase )
snake_case : int = fairseq_model.lm.state_dict()
snake_case : Tuple = rename_state_dict(
__lowerCamelCase , hidden_size=decoder_config.hidden_size )
snake_case : Optional[int] = TaEncoderModel.from_pretrained("t5-base" )
snake_case : List[Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
snake_case : Dict = MusicgenForCausalLM(__lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case : Dict = decoder.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__lowerCamelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case : Optional[int] = MusicgenForConditionalGeneration(text_encoder=__lowerCamelCase , audio_encoder=__lowerCamelCase , decoder=__lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCamelCase )
# check we can do a forward pass
snake_case : Tuple = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case : Optional[int] = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
snake_case : Any = AutoTokenizer.from_pretrained("t5-base" )
snake_case : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
snake_case : str = MusicgenProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
# set the appropriate bos/pad token ids
snake_case : int = 2048
snake_case : Tuple = 2048
# set other default generation config params
snake_case : Any = int(30 * audio_encoder.config.frame_rate )
snake_case : List[Any] = True
snake_case : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__lowerCamelCase )
processor.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__lowerCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase :
def __init__(self : List[str] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : str = 0 ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = row, column
snake_case : str = [[default_value for c in range(a_ )] for r in range(a_ )]
def __str__(self : Dict ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
snake_case : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
snake_case : Union[str, Any] = max(a_ , len(str(a_ ) ) )
snake_case : str = f"""%{max_element_length}s"""
# Make string and return
def single_line(snake_case__ : Optional[Any] ) -> str:
nonlocal string_format_identifier
snake_case : Any = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a_ ) for row_vector in self.array )
return s
def __repr__(self : Optional[int] ) -> Any:
'''simple docstring'''
return str(self )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(a_ , (list, tuple) ) and len(a_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : Optional[int] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
assert self.validate_indicies(a_ )
return self.array[loc[0]][loc[1]]
def __setitem__(self : Any , snake_case__ : int , snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
assert self.validate_indicies(a_ )
snake_case : Dict = value
def __add__(self : Optional[int] , snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
assert isinstance(a_ , a_ )
assert self.row == another.row and self.column == another.column
# Add
snake_case : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__(self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case : List[str] = -self[r, c]
return result
def __sub__(self : Tuple , snake_case__ : str ) -> int:
'''simple docstring'''
return self + (-another)
def __mul__(self : int , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a_ , (int, float) ): # Scalar multiplication
snake_case : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case : str = self[r, c] * another
return result
elif isinstance(a_ , a_ ): # Matrix multiplication
assert self.column == another.row
snake_case : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case : str = f"""Unsupported type given for another ({type(a_ )})"""
raise TypeError(a_ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case : Any = self[r, c]
return result
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str , snake_case__ : int ) -> List[str]:
'''simple docstring'''
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case : Tuple = v.transpose()
snake_case : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase ( ):
snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case : Union[str, Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
snake_case : Union[str, Any] = Matrix(3 , 1 , 0 )
snake_case : Optional[int] = 1, 2, -3
snake_case : Optional[Any] = Matrix(3 , 1 , 0 )
snake_case : Dict = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_snake_case , _snake_case )}""" )
def UpperCamelCase ( ):
import doctest
doctest.testmod()
testa()
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
return (data["data"], data["target"])
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
snake_case : Tuple = XGBClassifier()
classifier.fit(_A , _A )
return classifier
def UpperCamelCase ( ):
snake_case : Tuple = load_iris()
snake_case : List[str] = data_handling(_A )
snake_case : Dict = train_test_split(
_A , _A , test_size=0.25 )
snake_case : Tuple = iris['target_names']
# Create an XGBoost Classifier from the training data
snake_case : Optional[Any] = xgboost(_A , _A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_A , _A , _A , display_labels=_A , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : Union[str, Any] = ConsistencyModelPipeline
A__ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
A__ : str = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case : List[str] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
if class_cond:
snake_case : Tuple = self.dummy_cond_unet
else:
snake_case : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : List[str] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__UpperCAmelCase ).startswith("mps" ):
snake_case : str = torch.manual_seed(__UpperCAmelCase )
else:
snake_case : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
snake_case : List[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : List[str] = ConsistencyModelPipeline(**__UpperCAmelCase )
snake_case : List[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : Dict = self.get_dummy_inputs(__UpperCAmelCase )
snake_case : List[str] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Any = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components(class_cond=__UpperCAmelCase )
snake_case : int = ConsistencyModelPipeline(**__UpperCAmelCase )
snake_case : List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : List[Any] = self.get_dummy_inputs(__UpperCAmelCase )
snake_case : Optional[int] = 0
snake_case : int = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case : str = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : Optional[int] = ConsistencyModelPipeline(**__UpperCAmelCase )
snake_case : Optional[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
snake_case : List[Any] = 1
snake_case : int = None
snake_case : Optional[int] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case : Tuple = image[0, -3:, -3:, -1]
snake_case : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Any = self.get_dummy_components(class_cond=__UpperCAmelCase )
snake_case : int = ConsistencyModelPipeline(**__UpperCAmelCase )
snake_case : Optional[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : Any = self.get_dummy_inputs(__UpperCAmelCase )
snake_case : List[Any] = 1
snake_case : Dict = None
snake_case : Tuple = 0
snake_case : List[str] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Dict=0 , snake_case__ : Union[str, Any]=False , snake_case__ : Dict="cpu" , snake_case__ : str=torch.floataa , snake_case__ : List[Any]=(1, 3, 64, 64) ) -> List[str]:
'''simple docstring'''
snake_case : Any = torch.manual_seed(__UpperCAmelCase )
snake_case : Optional[Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
snake_case : Dict = self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase )
snake_case : List[Any] = latents
return inputs
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[str]=0 , snake_case__ : List[str]="cpu" , snake_case__ : Optional[int]=torch.floataa , snake_case__ : Tuple=(1, 3, 64, 64) ) -> List[Any]:
'''simple docstring'''
if type(__UpperCAmelCase ) == str:
snake_case : Optional[int] = torch.device(__UpperCAmelCase )
snake_case : List[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
snake_case : Optional[int] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
return latents
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : Union[str, Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : Union[str, Any] = self.get_inputs()
snake_case : int = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : Any = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : Tuple = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : Optional[Any] = self.get_inputs()
snake_case : str = 1
snake_case : Dict = None
snake_case : Optional[Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : Tuple = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
snake_case : str = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : List[str] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : Tuple = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
snake_case : Any = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Dict = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : Any = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
snake_case : str = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
snake_case : Dict = 1
snake_case : List[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
snake_case : str = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case : List[str] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
import qiskit
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : str = qiskit.Aer.get_backend("aer_simulator" )
snake_case : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
snake_case : Optional[Any] = qiskit.execute(snake_case_ , snake_case_ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
__lowerCamelCase = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
__lowerCamelCase = {
"""ernie-m-base""": 5_14,
"""ernie-m-large""": 5_14,
}
__lowerCamelCase = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( __UpperCamelCase ):
A__ : List[str] = ["input_ids"]
A__ : Tuple = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_INIT_CONFIGURATION
A__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = RESOURCE_FILES_NAMES
def __init__(self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=None , snake_case__ : List[str]=False , snake_case__ : str="utf8" , snake_case__ : str="[UNK]" , snake_case__ : str="[SEP]" , snake_case__ : int="[PAD]" , snake_case__ : Optional[Any]="[CLS]" , snake_case__ : Optional[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : str , ) -> List[Any]:
'''simple docstring'''
snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , vocab_file=snake_case__ , encoding=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = sentencepiece_model_ckpt
snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
snake_case : Tuple = self.load_vocab(filepath=snake_case__ )
else:
snake_case : int = {self.sp_model.id_to_piece(snake_case__ ): id for id in range(self.sp_model.get_piece_size() )}
snake_case : Dict = {v: k for k, v in self.vocab.items()}
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
if text is None:
return None
snake_case : List[str] = self.tokenize(snake_case__ )
snake_case : List[Any] = """""", []
for i, ch in enumerate(snake_case__ ):
if ch in self.SP_CHAR_MAPPING:
snake_case : Union[str, Any] = self.SP_CHAR_MAPPING.get(snake_case__ )
else:
snake_case : Optional[int] = unicodedata.normalize("NFKC" , snake_case__ )
if self.is_whitespace(snake_case__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case__ ) )
snake_case : str = normalized_text, [], 0
if self.do_lower_case:
snake_case : str = text.lower()
for token in split_tokens:
if token[:1] == "▁":
snake_case : str = token[1:]
snake_case : Dict = text[offset:].index(snake_case__ ) + offset
snake_case : List[str] = start + len(snake_case__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
snake_case : Optional[Any] = end
return token_mapping
@property
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.vocab )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = self.__dict__.copy()
snake_case : Optional[int] = None
return state
def __setstate__(self : Optional[Any] , snake_case__ : int ) -> Dict:
'''simple docstring'''
snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : int = {}
snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case__ , snake_case__ ) for c in text) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Tuple , snake_case__ : int=False , snake_case__ : Union[str, Any]=64 , snake_case__ : Dict=0.1 ) -> List[Any]:
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
snake_case : Optional[Any] = True
if self.sp_model_kwargs.get("alpha" ) is not None:
snake_case : Dict = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
snake_case : int = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
snake_case : Optional[int] = self.sp_model.EncodeAsPieces(snake_case__ )
else:
snake_case : List[Any] = self.sp_model.SampleEncodeAsPieces(snake_case__ , snake_case__ , snake_case__ )
snake_case : Optional[int] = []
for pi, piece in enumerate(snake_case__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case__ ) and pi != 0:
new_pieces.append(snake_case__ )
continue
else:
continue
snake_case : str = 0
for i, chunk in enumerate(snake_case__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case__ ) or self.is_punct(snake_case__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case__ )
snake_case : List[str] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
snake_case : Union[str, Any] = i
if len(snake_case__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : Any = """""".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : int ) -> Dict:
'''simple docstring'''
snake_case : str = self.convert_ids_to_tokens(snake_case__ )
snake_case : Any = """""".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.reverse_vocab.get(snake_case__ , self.unk_token )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
snake_case : Tuple = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : Any=None ) -> List[str]:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Any , snake_case__ : Dict=None , snake_case__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> Any:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case__ ) + 1) + [1] * (len(snake_case__ ) + 3)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str ) -> str:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[str] ) -> Tuple:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[str] ) -> int:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case__ ) == 1:
snake_case : Union[str, Any] = unicodedata.category(snake_case__ )
if cat == "Zs":
return True
return False
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = {}
with io.open(snake_case__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case__ ):
snake_case : Union[str, Any] = line.rstrip("\n" )
snake_case : str = int(snake_case__ )
return token_to_idx
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = 0
if os.path.isdir(snake_case__ ):
snake_case : List[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
snake_case : List[str] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
snake_case : List[str] = token_index
writer.write(token + "\n" )
index += 1
snake_case : Optional[Any] = os.path.join(snake_case__ , "sentencepiece.bpe.model" )
with open(snake_case__ , "wb" ) as fi:
snake_case : int = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (vocab_file,)
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__lowerCamelCase = True
from torch.cuda.amp import autocast
__lowerCamelCase = logging.getLogger(__name__)
def UpperCamelCase ( __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None ):
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class UpperCAmelCase :
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=__lowerCamelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
A__ : Optional[bool] = field(
default=__lowerCamelCase ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A__ : Optional[float] = field(
default=0.1 ,metadata={"help": "The dropout ratio for the attention probabilities."} )
A__ : Optional[float] = field(
default=0.1 ,metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
A__ : Optional[float] = field(
default=0.1 ,metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} ,)
A__ : Optional[float] = field(
default=0.1 ,metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} ,)
A__ : Optional[float] = field(
default=0.05 ,metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} ,)
A__ : Optional[float] = field(default=0.0 ,metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase :
A__ : Optional[str] = field(
default=__lowerCamelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A__ : Optional[str] = field(
default="train+validation" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} ,)
A__ : bool = field(
default=__lowerCamelCase ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A__ : Optional[int] = field(
default=__lowerCamelCase ,metadata={"help": "The number of processes to use for the preprocessing."} ,)
A__ : Optional[int] = field(
default=__lowerCamelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
A__ : Optional[int] = field(
default=__lowerCamelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} ,)
A__ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "\'", "\"", "�"] ,metadata={"help": "A list of characters to remove from the transcripts."} ,)
@dataclass
class UpperCAmelCase :
A__ : WavaVecaProcessor
A__ : Union[bool, str] = True
A__ : Optional[int] = None
A__ : Optional[int] = None
A__ : Optional[int] = None
A__ : Optional[int] = None
def __call__(self : Optional[int] , snake_case__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Tuple:
'''simple docstring'''
snake_case : Any = [{"input_values": feature["input_values"]} for feature in features]
snake_case : List[str] = [{"input_ids": feature["labels"]} for feature in features]
snake_case : List[str] = self.processor.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
snake_case : List[str] = self.processor.pad(
labels=UpperCamelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
snake_case : int = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
snake_case : Optional[Any] = labels
return batch
class UpperCAmelCase ( __lowerCamelCase ):
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] ) -> Optional[int]:
'''simple docstring'''
model.train()
snake_case : Union[str, Any] = self._prepare_inputs(UpperCamelCase_ )
if self.use_amp:
with autocast():
snake_case : Dict = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
else:
snake_case : Tuple = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
snake_case : Union[str, Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case : Union[str, Any] = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase_ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase ( ):
snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
snake_case : Optional[int] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
snake_case : Optional[Any] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
snake_case : int = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__lowerCamelCase : Any ):
snake_case : str = re.sub(A__ , "" , batch["sentence"] ).lower() + " "
return batch
snake_case : Union[str, Any] = train_dataset.map(A__ , remove_columns=["sentence"] )
snake_case : int = eval_dataset.map(A__ , remove_columns=["sentence"] )
def extract_all_chars(__lowerCamelCase : Dict ):
snake_case : int = " ".join(batch["text"] )
snake_case : Tuple = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
snake_case : List[str] = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
snake_case : Optional[Any] = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
snake_case : Optional[Any] = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
snake_case : Tuple = {v: k for k, v in enumerate(A__ )}
snake_case : Any = vocab_dict[" "]
del vocab_dict[" "]
snake_case : Dict = len(A__ )
snake_case : Union[str, Any] = len(A__ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : Tuple = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
snake_case : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
snake_case : List[str] = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
snake_case : List[str] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
snake_case : List[str] = min(len(A__ ) , data_args.max_train_samples )
snake_case : Union[str, Any] = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
snake_case : List[str] = eval_dataset.select(range(data_args.max_val_samples ) )
snake_case : List[str] = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__lowerCamelCase : Any ):
snake_case , snake_case : str = torchaudio.load(batch["path"] )
snake_case : str = resampler(A__ ).squeeze().numpy()
snake_case : int = 16000
snake_case : Optional[int] = batch["text"]
return batch
snake_case : Optional[Any] = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
snake_case : Optional[int] = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__lowerCamelCase : Dict ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
snake_case : Dict = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(A__ )
return batch
snake_case : Dict = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
snake_case : Tuple = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
snake_case : int = datasets.load_metric("wer" )
def compute_metrics(__lowerCamelCase : Optional[int] ):
snake_case : Tuple = pred.predictions
snake_case : List[str] = np.argmax(A__ , axis=-1 )
snake_case : Dict = processor.tokenizer.pad_token_id
snake_case : str = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
snake_case : int = processor.batch_decode(pred.label_ids , group_tokens=A__ )
snake_case : Tuple = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
snake_case : Any = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
snake_case : int = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case : int = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
snake_case : Tuple = model_args.model_name_or_path
else:
snake_case : Tuple = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
snake_case : List[str] = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
snake_case : int = train_result.metrics
snake_case : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
snake_case : List[Any] = min(A__ , len(A__ ) )
trainer.log_metrics("train" , A__ )
trainer.save_metrics("train" , A__ )
trainer.save_state()
# Evaluation
snake_case : Any = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case : str = trainer.evaluate()
snake_case : Any = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
snake_case : Optional[Any] = min(A__ , len(A__ ) )
trainer.log_metrics("eval" , A__ )
trainer.save_metrics("eval" , A__ )
return results
if __name__ == "__main__":
main()
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__lowerCamelCase = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : str ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : int ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case : Any = FlaxBertModel(__lowerCAmelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
snake_case : Dict = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
snake_case : str = flatten_dict(unfreeze(model.params ) )
snake_case : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase , repo_id="test-model-flax" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
snake_case : Optional[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
snake_case : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
snake_case : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> str:
'''simple docstring'''
snake_case : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case : int = FlaxBertModel(__lowerCAmelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
snake_case : Tuple = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
snake_case : Any = flatten_dict(unfreeze(model.params ) )
snake_case : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__lowerCAmelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
snake_case : List[Any] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
snake_case : Tuple = flatten_dict(unfreeze(model.params ) )
snake_case : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
snake_case : Dict = True
snake_case : Union[str, Any] = flatten_dict(modela.params )
snake_case : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
snake_case : str = False
return models_are_equal
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
snake_case : Optional[Any] = FlaxBertModel(__lowerCAmelCase )
snake_case : Dict = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
snake_case : List[str] = FlaxBertModel.from_pretrained(__lowerCAmelCase )
snake_case : Union[str, Any] = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
snake_case : Dict = FlaxBertModel(__lowerCAmelCase )
snake_case : Tuple = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , max_shard_size="10KB" )
with self.assertRaises(__lowerCAmelCase ):
snake_case : List[Any] = FlaxBertModel.from_pretrained(__lowerCAmelCase )
snake_case : Optional[int] = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = "bert"
snake_case : List[Any] = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__lowerCAmelCase ):
snake_case : List[Any] = FlaxBertModel.from_pretrained(__lowerCAmelCase )
snake_case : Optional[Any] = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
snake_case : int = "bert"
snake_case : Optional[Any] = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__lowerCAmelCase ):
snake_case : Tuple = FlaxBertModel.from_pretrained(__lowerCAmelCase )
snake_case : Tuple = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, Iterable[int]] , __lowerCamelCase : bool , __lowerCamelCase : int ):
def constraint_to_multiple_of(__lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[int]=None ):
snake_case : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case : Union[str, Any] = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case : Union[str, Any] = math.ceil(val / multiple ) * multiple
return x
snake_case : str = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
snake_case , snake_case : int = get_image_size(_UpperCamelCase )
snake_case , snake_case : Dict = output_size
# determine new height and width
snake_case : Dict = output_height / input_height
snake_case : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case : Union[str, Any] = scale_width
else:
# fit height
snake_case : List[str] = scale_height
snake_case : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
snake_case : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
A__ : Dict = ['pixel_values']
def __init__(self : str , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = False , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a_ )
snake_case : List[Any] = size if size is not None else {"height": 3_84, "width": 3_84}
snake_case : Optional[Any] = get_size_dict(a_ )
snake_case : Tuple = do_resize
snake_case : str = size
snake_case : Any = keep_aspect_ratio
snake_case : List[Any] = ensure_multiple_of
snake_case : str = resample
snake_case : List[Any] = do_rescale
snake_case : List[str] = rescale_factor
snake_case : List[Any] = do_normalize
snake_case : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : bool = False , snake_case__ : int = 1 , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : int , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
snake_case : Optional[int] = get_resize_output_image_size(
a_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=a_ , multiple=a_ , )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Tuple , ) -> str:
'''simple docstring'''
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ) -> int:
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : int = None , snake_case__ : bool = None , snake_case__ : int = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : str , ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : Optional[Any] = size if size is not None else self.size
snake_case : Any = get_size_dict(a_ )
snake_case : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case : int = resample if resample is not None else self.resample
snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean
snake_case : str = image_std if image_std is not None else self.image_std
snake_case : List[str] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(a_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_rescale:
snake_case : Any = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
snake_case : List[Any] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
snake_case : Dict = [to_channel_dimension_format(a_ , a_ ) for image in images]
snake_case : Tuple = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : int , snake_case__ : List[Tuple] = None ) -> List[Any]:
'''simple docstring'''
snake_case : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(a_ ) ):
snake_case : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a_ )
snake_case : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
snake_case : Optional[Any] = logits.argmax(dim=1 )
snake_case : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase ( A_ ):
A__ : Union[str, Any] = "EncodecFeatureExtractor"
A__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__(self : int , snake_case__ : Tuple , snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
super().__init__(_snake_case , _snake_case )
snake_case : str = self.feature_extractor
snake_case : List[str] = False
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Dict=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=True ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case )
def __call__(self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
snake_case : int = kwargs.pop("audio" , _snake_case )
snake_case : Any = kwargs.pop("sampling_rate" , _snake_case )
snake_case : int = kwargs.pop("text" , _snake_case )
if len(_snake_case ) > 0:
snake_case : Any = args[0]
snake_case : Any = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
snake_case : Optional[Any] = self.tokenizer(_snake_case , **_snake_case )
if audio is not None:
snake_case : List[Any] = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
snake_case : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
snake_case : Tuple = audio_inputs["padding_mask"]
return inputs
def _SCREAMING_SNAKE_CASE (self : Optional[int] , *snake_case__ : Any , **snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : str = kwargs.pop("audio" , _snake_case )
snake_case : Optional[Any] = kwargs.pop("padding_mask" , _snake_case )
if len(_snake_case ) > 0:
snake_case : Any = args[0]
snake_case : int = args[1:]
if audio_values is not None:
return self._decode_audio(_snake_case , padding_mask=_snake_case )
else:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _SCREAMING_SNAKE_CASE (self : Tuple , *snake_case__ : Optional[int] , **snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[str] , snake_case__ : List[Any] = None ) -> List[np.ndarray]:
'''simple docstring'''
snake_case : Any = to_numpy(_snake_case )
snake_case , snake_case , snake_case : Union[str, Any] = audio_values.shape
if padding_mask is None:
return list(_snake_case )
snake_case : Dict = to_numpy(_snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
snake_case : Tuple = seq_len - padding_mask.shape[-1]
snake_case : Union[str, Any] = 1 - self.feature_extractor.padding_value
snake_case : Any = np.pad(_snake_case , ((0, 0), (0, difference)) , "constant" , constant_values=_snake_case )
snake_case : Union[str, Any] = audio_values.tolist()
for i in range(_snake_case ):
snake_case : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
snake_case : List[str] = sliced_audio.reshape(_snake_case , -1 )
return audio_values
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__lowerCamelCase = True
except ImportError:
__lowerCamelCase = False
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( __lowerCamelCase : Any ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase ( __snake_case ):
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=snake_case__ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=snake_case__ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=snake_case__ )
def __init__(self : List[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Any=None , *snake_case__ : Tuple ) -> Any:
'''simple docstring'''
snake_case : List[str] = testing
snake_case : List[Any] = testing_file
snake_case : List[str] = path
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case : int = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(snake_case__ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case : List[Any] = (
Path(snake_case__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case : int = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(snake_case__ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
snake_case : int = json.load(snake_case__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=snake_case__ , extra_context=snake_case__ , )
snake_case : List[str] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
snake_case : Tuple = json.load(snake_case__ )
snake_case : str = configuration["lowercase_modelname"]
snake_case : Dict = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"""{directory}/configuration.json""" )
snake_case : int = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case : int = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case : Optional[Any] = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case : List[str] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(snake_case__ , exist_ok=snake_case__ )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=snake_case__ )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(snake_case__ : int ):
with open(snake_case__ , "r" ) as f:
snake_case : Tuple = f.readlines()
with open(snake_case__ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(snake_case__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case__ : Any , snake_case__ : Tuple , snake_case__ : str ):
# Create temp file
snake_case , snake_case : str = mkstemp()
snake_case : Union[str, Any] = False
with fdopen(snake_case__ , "w" ) as new_file:
with open(snake_case__ ) as old_file:
for line in old_file:
new_file.write(snake_case__ )
if line_to_copy_below in line:
snake_case : Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(snake_case__ )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(snake_case__ , snake_case__ )
# Remove original file
remove(snake_case__ )
# Move new file
move(snake_case__ , snake_case__ )
def skip_units(snake_case__ : Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case__ : Any ):
with open(snake_case__ ) as datafile:
snake_case : Optional[int] = []
snake_case : str = False
snake_case : Any = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case : List[Any] = line.split("\"" )[1]
snake_case : List[str] = skip_units(snake_case__ )
elif "# Below: " in line and "##" not in line:
snake_case : Optional[Any] = line.split("\"" )[1]
snake_case : Dict = skip_units(snake_case__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(snake_case__ , snake_case__ , snake_case__ )
snake_case : List[Any] = []
elif "# Replace with" in line and "##" not in line:
snake_case : List[str] = []
elif "##" not in line:
lines_to_copy.append(snake_case__ )
remove(snake_case__ )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(snake_case__ )
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__lowerCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : tuple , __lowerCamelCase : Path , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : bool = False ):
"""simple docstring"""
snake_case : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case : Optional[Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
snake_case : List[str] = "cpu"
snake_case : Dict = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
snake_case : Optional[int] = Path(UpperCamelCase__ )
# TEXT ENCODER
snake_case : Any = pipeline.text_encoder.config.max_position_embeddings
snake_case : Dict = pipeline.text_encoder.config.hidden_size
snake_case : Optional[int] = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
snake_case : Dict = pipeline.unet.config.in_channels
snake_case : Tuple = pipeline.unet.config.sample_size
snake_case : List[Any] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
snake_case : List[Any] = str(unet_path.absolute().as_posix() )
snake_case : List[str] = os.path.dirname(UpperCamelCase__ )
snake_case : List[str] = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location="weights.pb" , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
snake_case : Optional[int] = pipeline.vae
snake_case : Optional[int] = vae_encoder.config.in_channels
snake_case : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case : List[Any] = lambda __lowerCamelCase , __lowerCamelCase : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=UpperCamelCase__ , )
# VAE DECODER
snake_case : Optional[int] = pipeline.vae
snake_case : Optional[Any] = vae_decoder.config.latent_channels
snake_case : Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case : Optional[Any] = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case : Optional[Any] = pipeline.safety_checker
snake_case : Any = safety_checker.config.vision_config.num_channels
snake_case : Any = safety_checker.config.vision_config.image_size
snake_case : Dict = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
snake_case : Tuple = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
snake_case : Dict = pipeline.feature_extractor
else:
snake_case : Tuple = None
snake_case : List[str] = None
snake_case : int = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print("ONNX pipeline saved to" , UpperCamelCase__ )
del pipeline
del onnx_pipeline
snake_case : List[str] = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__lowerCamelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
A__ : List[str] = XLMRobertaTokenizer
A__ : List[Any] = XLMRobertaTokenizerFast
A__ : List[str] = True
A__ : int = True
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : List[str] = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Any = "<pad>"
snake_case : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_02 )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
snake_case : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : Tuple = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : Tuple = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case : Dict = tempfile.mkdtemp()
snake_case : Any = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case : Optional[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : Optional[int] = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : List[Any] = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Dict = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : List[Any] = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case : Dict = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
@cached_property
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_SCREAMING_SNAKE_CASE , f.name )
snake_case : Any = XLMRobertaTokenizer(f.name , keep_accents=_SCREAMING_SNAKE_CASE )
snake_case : Any = pickle.dumps(_SCREAMING_SNAKE_CASE )
pickle.loads(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : Optional[int] = self.get_tokenizer()
snake_case : List[Any] = self.get_rust_tokenizer()
snake_case : str = "I was born in 92000, and this is falsé."
snake_case : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case : Tuple = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case : str = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : str = self.get_rust_tokenizer()
snake_case : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
snake_case : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = "Hello World!"
snake_case : int = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case : Tuple = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = "▁"
__lowerCamelCase = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__lowerCamelCase = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__lowerCamelCase = {
"facebook/s2t-small-librispeech-asr": 10_24,
}
__lowerCamelCase = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__lowerCamelCase = {"mustc": MUSTC_LANGS}
class UpperCAmelCase ( snake_case__ ):
A__ : int = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = MAX_MODEL_INPUT_SIZES
A__ : Any = ["input_ids", "attention_mask"]
A__ : Optional[int] = []
def __init__(self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict="<s>" , snake_case__ : Dict="</s>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : Tuple="<unk>" , snake_case__ : int=False , snake_case__ : List[str]=False , snake_case__ : Dict=None , snake_case__ : List[Any]=None , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_upper_case=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , lang_codes=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case : List[Any] = do_upper_case
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = load_json(UpperCAmelCase_ )
snake_case : Tuple = {v: k for k, v in self.encoder.items()}
snake_case : int = spm_file
snake_case : List[str] = load_spm(UpperCAmelCase_ , self.sp_model_kwargs )
if lang_codes is not None:
snake_case : Optional[int] = lang_codes
snake_case : Any = LANGUAGES[lang_codes]
snake_case : Union[str, Any] = [f"""<lang:{lang}>""" for lang in self.langs]
snake_case : Union[str, Any] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
snake_case : Any = self.lang_tokens
snake_case : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
snake_case : Any = {}
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
return len(self.encoder )
@property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.lang_code_to_id[tgt_lang]
snake_case : int = [lang_code_id]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> Any:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase_ , self.encoder[self.unk_token] )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = []
snake_case : Union[str, Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
snake_case : Any = self.sp_model.decode(UpperCAmelCase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
snake_case : int = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case : Dict = self.sp_model.decode(UpperCAmelCase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Any , snake_case__ : Dict=None ) -> List[Any]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> str:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
snake_case : int = [1] * len(self.prefix_tokens )
snake_case : Optional[int] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Dict ) -> Dict:
'''simple docstring'''
snake_case : Any = self.__dict__.copy()
snake_case : List[str] = None
return state
def __setstate__(self : int , snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Union[str, Any] = {}
snake_case : Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = Path(UpperCAmelCase_ )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
snake_case : List[str] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
snake_case : int = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , UpperCAmelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
snake_case : int = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (str(UpperCAmelCase_ ), str(UpperCAmelCase_ ))
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Dict = sentencepiece.SentencePieceProcessor(**_UpperCAmelCase )
spm.Load(str(_UpperCAmelCase ) )
return spm
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
with open(_UpperCAmelCase , "r" ) as f:
return json.load(_UpperCAmelCase )
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
with open(_UpperCAmelCase , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=2 )
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCamelCase = """sshleifer/bart-tiny-random"""
__lowerCamelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
return AutoConfig.from_pretrained(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case , *snake_case : str = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
snake_case , *snake_case : Optional[int] = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case , *snake_case : Union[str, Any] = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
snake_case , *snake_case : str = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=lowerCAmelCase__ , d=lowerCAmelCase__ )
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( UpperCamelCase__ ,unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
snake_case : List[str] = ort.SessionOptions()
snake_case : List[Any] = False
return options
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case : List[str] = "A red cat sitting on a park bench"
snake_case : List[str] = np.random.RandomState(0 )
snake_case : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCamelCase , output_type="np" , )
snake_case : Dict = output.images
snake_case : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
snake_case : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case : Dict = "A red cat sitting on a park bench"
snake_case : List[str] = np.random.RandomState(0 )
snake_case : List[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCamelCase , output_type="np" , )
snake_case : Any = output.images
snake_case : List[str] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case : List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""CLIPFeatureExtractor"""]
__lowerCamelCase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from __future__ import annotations
import requests
__lowerCamelCase = set(
"""approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int = 1 , __lowerCamelCase : str = "new" , __lowerCamelCase : list | None = None ):
snake_case : Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_snake_case ) - valid_terms ) ):
snake_case : List[Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(_snake_case )
snake_case : Dict = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
snake_case : Union[str, Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_snake_case )}
snake_case : int = {}
for id_ in range(_snake_case ):
snake_case : int = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase ( _a ):
A__ : Optional[int] = (DPMSolverSDEScheduler,)
A__ : List[Any] = 10
def _SCREAMING_SNAKE_CASE (self : List[str] , **snake_case__ : Dict ) -> Dict:
'''simple docstring'''
snake_case : List[str] = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_a )
return config
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config()
snake_case : Tuple = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : List[Any] = self.dummy_model()
snake_case : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : str = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case : List[Any] = scheduler.scale_model_input(_a , _a )
snake_case : Dict = model(_a , _a )
snake_case : Optional[Any] = scheduler.step(_a , _a , _a )
snake_case : Optional[int] = output.prev_sample
snake_case : Optional[Any] = torch.sum(torch.abs(_a ) )
snake_case : Optional[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : Dict = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case : Any = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : Union[str, Any] = self.dummy_model()
snake_case : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Dict = scheduler.scale_model_input(_a , _a )
snake_case : str = model(_a , _a )
snake_case : int = scheduler.step(_a , _a , _a )
snake_case : List[Any] = output.prev_sample
snake_case : Optional[Any] = torch.sum(torch.abs(_a ) )
snake_case : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.scheduler_classes[0]
snake_case : Union[str, Any] = self.get_scheduler_config()
snake_case : Dict = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
snake_case : Any = self.dummy_model()
snake_case : List[str] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case : Optional[Any] = scheduler.scale_model_input(_a , _a )
snake_case : List[str] = model(_a , _a )
snake_case : str = scheduler.step(_a , _a , _a )
snake_case : Optional[Any] = output.prev_sample
snake_case : List[str] = torch.sum(torch.abs(_a ) )
snake_case : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config()
snake_case : List[Any] = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
snake_case : List[str] = self.dummy_model()
snake_case : List[str] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
snake_case : Optional[int] = sample.to(_a )
for t in scheduler.timesteps:
snake_case : Tuple = scheduler.scale_model_input(_a , _a )
snake_case : Any = model(_a , _a )
snake_case : int = scheduler.step(_a , _a , _a )
snake_case : int = output.prev_sample
snake_case : List[Any] = torch.sum(torch.abs(_a ) )
snake_case : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
for attribute in key.split("." ):
snake_case : str = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case : Optional[int] = value
elif weight_type == "weight_g":
snake_case : List[Any] = value
elif weight_type == "weight_v":
snake_case : Union[str, Any] = value
elif weight_type == "bias":
snake_case : Optional[int] = value
else:
snake_case : str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
snake_case : Dict = []
snake_case : Optional[int] = fairseq_model.state_dict()
snake_case : Optional[int] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
snake_case : Optional[Any] = True
if "*" in mapped_key:
snake_case : Dict = name.split(_lowerCAmelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
snake_case : List[Any] = """weight_g"""
elif "weight_v" in name:
snake_case : Optional[Any] = """weight_v"""
elif "weight" in name:
snake_case : str = """weight"""
elif "bias" in name:
snake_case : List[str] = """bias"""
else:
snake_case : str = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict ):
snake_case : Optional[int] = full_name.split("conv_layers." )[-1]
snake_case : List[str] = name.split("." )
snake_case : List[str] = int(items[0] )
snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=True ):
if config_path is not None:
snake_case : Optional[Any] = HubertConfig.from_pretrained(_lowerCAmelCase )
else:
snake_case : str = HubertConfig()
if is_finetuned:
if dict_path:
snake_case : Optional[int] = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case : Optional[int] = target_dict.pad_index
snake_case : int = target_dict.bos_index
snake_case : Tuple = target_dict.eos_index
snake_case : List[Any] = len(target_dict.symbols )
snake_case : int = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCAmelCase )
snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
snake_case : List[str] = True if config.feat_extract_norm == """layer""" else False
snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case : List[Any] = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
snake_case : Optional[Any] = HubertForCTC(_lowerCAmelCase )
else:
snake_case : List[str] = HubertModel(_lowerCAmelCase )
if is_finetuned:
snake_case : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case : List[Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(__lowerCAmelCase ) == 1:
return True
snake_case : Any = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCamelCase ( __lowerCamelCase : Any ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
snake_case : Any = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
if n_term == "":
return []
snake_case : list = []
for temp in range(int(_UpperCAmelCase ) ):
series.append(f"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
__lowerCamelCase = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar("""T""")
class UpperCAmelCase ( Generic[T] ):
def __init__(self : str , snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = data
snake_case : Optional[int] = self
snake_case : Union[str, Any] = 0
class UpperCAmelCase ( Generic[T] ):
def __init__(self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Dict = {}
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = DisjointSetTreeNode(__SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.map[data]
if elem_ref != elem_ref.parent:
snake_case : List[str] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if nodea.rank > nodea.rank:
snake_case : Dict = nodea
else:
snake_case : Tuple = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Dict , snake_case__ : str ) -> List[str]:
'''simple docstring'''
self.link(self.find_set(__SCREAMING_SNAKE_CASE ) , self.find_set(__SCREAMING_SNAKE_CASE ) )
class UpperCAmelCase ( Generic[T] ):
def __init__(self : str ) -> Any:
'''simple docstring'''
snake_case : int = {}
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Any ) -> Tuple:
'''simple docstring'''
if node not in self.connections:
snake_case : Any = {}
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Dict , snake_case__ : Any , snake_case__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
snake_case : Dict = weight
snake_case : Tuple = weight
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = []
snake_case : Union[str, Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case__ : x[2] )
# creating the disjoint set
snake_case : Optional[int] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__SCREAMING_SNAKE_CASE )
# MST generation
snake_case : List[Any] = 0
snake_case : Optional[int] = 0
snake_case : Tuple = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case , snake_case , snake_case : Any = edges[index]
index += 1
snake_case : Any = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
snake_case : int = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
disjoint_set.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return graph
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
def __init__(self : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple=13 , snake_case__ : int=7 , snake_case__ : Optional[int]=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=99 , snake_case__ : Any=64 , snake_case__ : int=5 , snake_case__ : Tuple=4 , snake_case__ : List[Any]=64 , snake_case__ : int="gelu" , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=5_12 , snake_case__ : List[str]=16 , snake_case__ : List[Any]=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : int=3 , snake_case__ : int=4 , snake_case__ : Any=None , ) -> Optional[int]:
'''simple docstring'''
snake_case : str = parent
snake_case : Optional[int] = batch_size
snake_case : Tuple = seq_length
snake_case : str = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : str = use_labels
snake_case : Any = vocab_size
snake_case : List[str] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : Optional[int] = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : List[str] = num_labels
snake_case : Optional[int] = num_choices
snake_case : List[str] = scope
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[int] = None
snake_case : str = None
snake_case : List[str] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Optional[int] = model(snake_case__ , snake_case__ )
snake_case : int = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Dict = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = self.num_labels
snake_case : str = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = self.num_choices
snake_case : List[str] = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = self.num_labels
snake_case : Union[str, Any] = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Tuple = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = self.prepare_config_and_inputs()
((snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case)) : List[Any] = config_and_inputs
snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,snake_case_ ,unittest.TestCase ):
A__ : List[str] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Optional[int] = False
A__ : Tuple = True
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = MPNetModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
snake_case : Dict = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case : Any = model(snake_case__ )[0]
snake_case : Optional[int] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , snake_case__ )
snake_case : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class UpperCAmelCase ( A_ ):
def __init__(self : int , **snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__(self : List[str] , snake_case__ : Union[str, List[str], "Image", List["Image"]] , **snake_case__ : Any ) -> Dict:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (self : List[str] , **snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = {}
if "candidate_labels" in kwargs:
snake_case : Union[str, Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
snake_case : List[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : str=None , snake_case__ : Optional[Any]="This is a photo of {}." ) -> Any:
'''simple docstring'''
snake_case : Dict = load_image(_lowerCAmelCase )
snake_case : Dict = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case : Tuple = candidate_labels
snake_case : Dict = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
snake_case : Dict = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = model_inputs.pop("candidate_labels" )
snake_case : List[Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
snake_case : List[Any] = text_inputs[0]
else:
# Batching case.
snake_case : Any = text_inputs[0][0]
snake_case : List[str] = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
snake_case : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[int] ) -> str:
'''simple docstring'''
snake_case : List[str] = model_outputs.pop("candidate_labels" )
snake_case : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
snake_case : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case : Optional[Any] = probs.tolist()
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case : List[str] = [scores]
elif self.framework == "tf":
snake_case : Any = stable_softmax(_lowerCAmelCase , axis=-1 )
snake_case : int = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : Any = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda snake_case__ : -x[0] )
]
return result
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
snake_case : Union[str, Any] = LxmertConfig.from_json_file(_A )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case : Optional[int] = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
A__ : Optional[int] = 42
A__ : Dict = 42
class UpperCAmelCase :
def __init__(self : List[Any] , snake_case__ : int ) -> str:
'''simple docstring'''
snake_case : list[list[Edge]] = [[] for _ in range(snake_case__ )]
snake_case : Optional[int] = size
def __getitem__(self : str , snake_case__ : int ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
return self._size
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : int ) -> int | None:
'''simple docstring'''
snake_case : List[str] = deque([start_vertex] )
snake_case : list[int | None] = [None] * self.size
snake_case : List[str] = 0
while queue:
snake_case : Any = queue.popleft()
snake_case : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case : Tuple = current_distance + edge.weight
snake_case : Tuple = distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__ )
and new_distance >= dest_vertex_distance
):
continue
snake_case : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCamelCase ( __lowerCamelCase : Any ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase ( __lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : ArgumentParser ) -> Any:
'''simple docstring'''
snake_case : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=lowerCamelCase__ , help="Name of the model to download" )
download_parser.set_defaults(func=lowerCamelCase__ )
def __init__(self : Any , snake_case__ : str , snake_case__ : str , snake_case__ : bool , snake_case__ : bool ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = model
snake_case : Optional[Any] = cache
snake_case : str = force
snake_case : int = trust_remote_code
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Collection[float] | None = None ) -> List[str]:
'''simple docstring'''
if components is None:
snake_case : Dict = []
snake_case : Dict = list(lowercase__ )
def __len__(self : int ) -> Optional[int]:
'''simple docstring'''
return len(self.__components )
def __str__(self : Optional[Any] ) -> str:
'''simple docstring'''
return "(" + ",".join(map(lowercase__ , self.__components ) ) + ")"
def __add__(self : Union[str, Any] , snake_case__ : Vector ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = len(self )
if size == len(lowercase__ ):
snake_case : str = [self.__components[i] + other.component(lowercase__ ) for i in range(lowercase__ )]
return Vector(lowercase__ )
else:
raise Exception("must have the same size" )
def __sub__(self : Optional[Any] , snake_case__ : Vector ) -> Any:
'''simple docstring'''
snake_case : List[Any] = len(self )
if size == len(lowercase__ ):
snake_case : Tuple = [self.__components[i] - other.component(lowercase__ ) for i in range(lowercase__ )]
return Vector(lowercase__ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__(self : List[str] , snake_case__ : float ) -> Dict:
'''simple docstring'''
...
@overload
def __mul__(self : str , snake_case__ : Vector ) -> List[str]:
'''simple docstring'''
...
def __mul__(self : Union[str, Any] , snake_case__ : float | Vector ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowercase__ , (float, int) ):
snake_case : List[Any] = [c * other for c in self.__components]
return Vector(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ) and len(self ) == len(lowercase__ ):
snake_case : Union[str, Any] = len(self )
snake_case : Optional[int] = [self.__components[i] * other.component(lowercase__ ) for i in range(lowercase__ )]
return sum(lowercase__ )
else: # error case
raise Exception("invalid operand!" )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
return Vector(self.__components )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : int , snake_case__ : float ) -> Optional[Any]:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
snake_case : Optional[int] = value
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
snake_case : List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase__ ) )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Vector , snake_case__ : bool = False ) -> List[str]:
'''simple docstring'''
snake_case : Dict = self * other
snake_case : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
assert isinstance(A__ , A__ )
return Vector([0] * dimension )
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
assert isinstance(A__ , A__ ) and (isinstance(A__ , A__ ))
snake_case : List[Any] = [0] * dimension
snake_case : List[str] = 1
return Vector(A__ )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (isinstance(A__ , (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
random.seed(A__ )
snake_case : Optional[Any] = [random.randint(A__ , A__ ) for _ in range(A__ )]
return Vector(A__ )
class UpperCAmelCase :
def __init__(self : List[Any] , snake_case__ : list[list[float]] , snake_case__ : int , snake_case__ : int ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = matrix
snake_case : Any = w
snake_case : Optional[Any] = h
def __str__(self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self : Dict , snake_case__ : Matrix ) -> str:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
snake_case : int = []
for i in range(self.__height ):
snake_case : Dict = [
self.__matrix[i][j] + other.component(lowercase__ , lowercase__ )
for j in range(self.__width )
]
matrix.append(lowercase__ )
return Matrix(lowercase__ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__(self : Tuple , snake_case__ : Matrix ) -> List[Any]:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
snake_case : int = []
for i in range(self.__height ):
snake_case : Optional[Any] = [
self.__matrix[i][j] - other.component(lowercase__ , lowercase__ )
for j in range(self.__width )
]
matrix.append(lowercase__ )
return Matrix(lowercase__ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__(self : Dict , snake_case__ : float ) -> int:
'''simple docstring'''
...
@overload
def __mul__(self : str , snake_case__ : Vector ) -> List[str]:
'''simple docstring'''
...
def __mul__(self : Optional[Any] , snake_case__ : float | Vector ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ): # matrix-vector
if len(lowercase__ ) == self.__width:
snake_case : Optional[int] = zero_vector(self.__height )
for i in range(self.__height ):
snake_case : Tuple = [
self.__matrix[i][j] * other.component(lowercase__ )
for j in range(self.__width )
]
ans.change_component(lowercase__ , sum(lowercase__ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(lowercase__ , (int, float) ): # matrix-scalar
snake_case : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase__ , self.__width , self.__height )
return None
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
return self.__height
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
return self.__width
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : int , snake_case__ : float ) -> Optional[int]:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
snake_case : List[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
snake_case : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase__ ) ):
snake_case : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase__ , self.__width - 1 , self.__height - 1 ).determinant()
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase__ , lowercase__ )
else:
raise Exception("Indices out of bounds" )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
snake_case : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , lowercase__ ) for y in range(self.__width )
]
return sum(lowercase__ )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : List[Any] = [[0] * n for _ in range(A__ )]
return Matrix(A__ , A__ , A__ )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
random.seed(A__ )
snake_case : Optional[Any] = [
[random.randint(A__ , A__ ) for _ in range(A__ )] for _ in range(A__ )
]
return Matrix(A__ , A__ , A__ )
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '▁'
__lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__lowerCamelCase = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
__lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
A__ : int = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = ["input_ids", "attention_mask"]
A__ : List[str] = []
A__ : Tuple = []
def __init__(self : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[Any]="</s>" , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : Any=None , snake_case__ : int=None , snake_case__ : Tuple=None , snake_case__ : Optional[Dict[str, Any]] = None , snake_case__ : List[str]=None , **snake_case__ : Tuple , ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , tokenizer_file=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : Any = 1
snake_case : Dict = len(self.sp_model )
snake_case : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case__ )
}
snake_case : int = {v: k for k, v in self.lang_code_to_id.items()}
snake_case : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case : Tuple = src_lang if src_lang is not None else 'en_XX'
snake_case : Union[str, Any] = self.lang_code_to_id[self._src_lang]
snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Tuple = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Any , snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
snake_case : Optional[int] = [1] * len(self.prefix_tokens )
snake_case : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] , snake_case__ : Optional[str] , **snake_case__ : str ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
snake_case : Optional[Any] = src_lang
snake_case : int = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
snake_case : Any = self.convert_tokens_to_ids(snake_case__ )
snake_case : Optional[int] = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Union[str, Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : str = ''.join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str] , snake_case__ : str = "en_XX" , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "ro_RO" , **snake_case__ : Dict , ) -> BatchEncoding:
'''simple docstring'''
snake_case : int = src_lang
snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : str = self.lang_code_to_id[src_lang]
snake_case : Optional[int] = []
snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : List[str] = self.lang_code_to_id[lang]
snake_case : Dict = []
snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowerCamelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
from math import factorial
def UpperCamelCase ( __lowerCamelCase : int = 100 ):
return sum(int(__lowerCamelCase ) for x in str(factorial(__lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def UpperCamelCase ( __lowerCamelCase : Tuple ):
snake_case : Dict = {}
state_dict.pop("pixel_mean" , __lowerCAmelCase )
state_dict.pop("pixel_std" , __lowerCAmelCase )
snake_case : List[str] = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case : List[Any] = key.replace(__lowerCAmelCase , __lowerCAmelCase )
if re.match(__lowerCAmelCase , __lowerCAmelCase ):
snake_case : Tuple = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(2 ) )
if layer_nb == 0:
snake_case : Union[str, Any] = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
snake_case : Dict = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
snake_case : str = key.replace("layers.2" , "proj_out" )
snake_case : Union[str, Any] = value
snake_case : Optional[int] = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]="ybelkada/segment-anything" ):
snake_case : Any = hf_hub_download(__lowerCAmelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
snake_case : Dict = SamConfig()
elif "sam_vit_l" in model_name:
snake_case : Optional[Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case : List[Any] = SamConfig(
vision_config=__lowerCAmelCase , )
elif "sam_vit_h" in model_name:
snake_case : Any = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case : Optional[Any] = SamConfig(
vision_config=__lowerCAmelCase , )
snake_case : List[str] = torch.load(__lowerCAmelCase , map_location="cpu" )
snake_case : Any = replace_keys(__lowerCAmelCase )
snake_case : Optional[Any] = SamImageProcessor()
snake_case : str = SamProcessor(image_processor=__lowerCAmelCase )
snake_case : Dict = SamModel(__lowerCAmelCase )
hf_model.load_state_dict(__lowerCAmelCase )
snake_case : Optional[int] = hf_model.to("cuda" )
snake_case : List[str] = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
snake_case : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("RGB" )
snake_case : Optional[Any] = [[[400, 650]]]
snake_case : List[str] = [[1]]
snake_case : Optional[Any] = processor(images=np.array(__lowerCAmelCase ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : List[str] = hf_model(**__lowerCAmelCase )
snake_case : int = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case : List[Any] = processor(
images=np.array(__lowerCAmelCase ) , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : Dict = hf_model(**__lowerCAmelCase )
snake_case : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case : List[Any] = ((75, 275, 1725, 850),)
snake_case : List[str] = processor(images=np.array(__lowerCAmelCase ) , input_boxes=__lowerCAmelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : Tuple = hf_model(**__lowerCAmelCase )
snake_case : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case : Optional[int] = [[[400, 650], [800, 650]]]
snake_case : List[str] = [[1, 1]]
snake_case : Optional[Any] = processor(
images=np.array(__lowerCAmelCase ) , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case : List[str] = hf_model(**__lowerCAmelCase )
snake_case : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCamelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import numpy as np
import datasets
__lowerCamelCase = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__lowerCamelCase = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__lowerCamelCase = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case : Any = np.array(__SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
snake_case : Dict = X - np.mean(__SCREAMING_SNAKE_CASE )
snake_case : Tuple = np.cov(reference_distribution.T )
try:
snake_case : int = np.linalg.inv(__SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
snake_case : Optional[Any] = np.linalg.pinv(__SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case : List[str] = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
__lowerCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCamelCase = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
assert len(str(__snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case : str = year // 100
snake_case : str = (5 * (century % 4) + 2) % 7
snake_case : List[str] = year % 100
snake_case : Dict = centurian % 12
snake_case : Optional[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case : int = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case : Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCAmelCase ( __snake_case ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCamelCase_ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = self._create_example_records()
snake_case : Tuple = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self._create_example_records()
snake_case : Union[str, Any] = Dataset.from_list(lowerCamelCase_ )
snake_case : Optional[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str: # checks what happens with missing columns
'''simple docstring'''
snake_case : str = [{"""col_1""": 1}, {"""col_2""": """x"""}]
snake_case : Optional[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]: # checks if the type can be inferred from the second record
'''simple docstring'''
snake_case : List[str] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
snake_case : Optional[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=0 ):
if name is None:
snake_case : int = None
else:
snake_case : Optional[Any] = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
snake_case : int = fmt.format(SCREAMING_SNAKE_CASE_ )
# Print and recurse (if needed).
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if msg is not None:
print(SCREAMING_SNAKE_CASE_ )
for k in val.keys():
recursive_print(SCREAMING_SNAKE_CASE_ , val[k] , spaces + 2 )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
print(SCREAMING_SNAKE_CASE_ , ":" , val.size() )
else:
print(SCREAMING_SNAKE_CASE_ , ":" , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
snake_case : List[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case : Dict = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case : List[Any] = param.view(*SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = param.transpose(0 , 2 )
snake_case : str = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case : List[Any] = param.view(*SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = param.transpose(0 , 1 ).contiguous()
snake_case : str = param.view(*SCREAMING_SNAKE_CASE_ )
return param
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
snake_case : List[str] = {}
# old versions did not store training args
snake_case : Tuple = input_state_dict.get("args" , SCREAMING_SNAKE_CASE_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case : Optional[Any] = ds_args.padded_vocab_size
snake_case : List[Any] = ds_args.max_position_embeddings
snake_case : str = ds_args.hidden_size
snake_case : str = ds_args.num_layers
snake_case : Union[str, Any] = ds_args.num_attention_heads
snake_case : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case : List[str] = config.n_head
# The hidden_size per head.
snake_case : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case : Any = input_state_dict["checkpoint_version"]
else:
snake_case : Any = 0.0
# The model.
snake_case : List[Any] = input_state_dict["model"]
# The language model.
snake_case : Union[str, Any] = model["language_model"]
# The embeddings.
snake_case : List[Any] = lm["embedding"]
# The word embeddings.
snake_case : Dict = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
snake_case : Any = word_embeddings[: config.vocab_size, :]
snake_case : str = word_embeddings
# The position embeddings.
snake_case : List[Any] = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case : Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
snake_case : Optional[int] = pos_embeddings
# The transformer.
snake_case : Optional[Any] = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
snake_case : int = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
snake_case : List[str] = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case : Optional[Any] = layer_re.match(SCREAMING_SNAKE_CASE_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case : List[Any] = int(m.group(1 ) )
# The name of the operation.
snake_case : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
snake_case : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case : List[str] = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
snake_case : Optional[int] = "ln_1" if op_name.startswith("input" ) else "ln_2"
snake_case : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case : Dict = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case : Optional[int] = masked_bias
snake_case : List[str] = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case : Tuple = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Store. No change of shape.
snake_case : Optional[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case : Tuple = megatron_to_transformers[op_name]
snake_case : Dict = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case : Tuple = megatron_to_transformers[op_name]
snake_case : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case : str = transformer["final_layernorm.weight"]
snake_case : int = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ):
snake_case : int = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=SCREAMING_SNAKE_CASE_ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=SCREAMING_SNAKE_CASE_ , help="An optional config json file describing the pre-trained model." , )
snake_case : Dict = parser.parse_args()
# Extract the basename.
snake_case : str = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
snake_case : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
else:
snake_case : List[Any] = torch.load(args.path_to_checkpoint , map_location="cpu" )
snake_case : int = input_state_dict.get("args" , SCREAMING_SNAKE_CASE_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case : Optional[int] = "gelu_fast"
elif ds_args.openai_gelu:
snake_case : Optional[int] = "gelu_new"
else:
snake_case : Optional[int] = "gelu"
else:
# in the very early days this used to be "gelu_new"
snake_case : Optional[Any] = "gelu_new"
# Spell out all parameters in case the defaults change.
snake_case : Tuple = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=SCREAMING_SNAKE_CASE_ , summary_activation=SCREAMING_SNAKE_CASE_ , summary_proj_to_labels=SCREAMING_SNAKE_CASE_ , summary_first_dropout=0.1 , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=50256 , eos_token_id=50256 , )
else:
snake_case : Optional[int] = GPTaConfig.from_json_file(args.config_file )
snake_case : Optional[int] = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
snake_case : Optional[Any] = convert_megatron_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case : Tuple = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
snake_case : Union[str, Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
snake_case : str = "gpt2"
snake_case : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Any = type(SCREAMING_SNAKE_CASE_ ).__name__
snake_case : Any = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Store the state_dict to file.
snake_case : int = os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : str = tempfile.mkdtemp()
# fmt: off
snake_case : Dict = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
snake_case : Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
snake_case : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
snake_case : List[Any] = {"unk_token": "<unk>"}
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
snake_case : int = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
snake_case : List[Any] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : List[Any] ) -> Tuple:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : int , **snake_case__ : Any ) -> Tuple:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : str = self.get_tokenizer()
snake_case : Tuple = self.get_rust_tokenizer()
snake_case : str = self.get_image_processor()
snake_case : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
snake_case : int = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
snake_case : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
snake_case : Optional[int] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : Union[str, Any] = image_processor(_UpperCAmelCase , return_tensors="np" )
snake_case : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case : Dict = "lower newer"
snake_case : str = processor(text=_UpperCAmelCase )
snake_case : Any = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : str = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case : Optional[int] = "lower newer"
snake_case : Tuple = self.prepare_image_inputs()
snake_case : str = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : int = processor.batch_decode(_UpperCAmelCase )
snake_case : Tuple = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : List[str] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case : Optional[Any] = "lower newer"
snake_case : List[Any] = self.prepare_image_inputs()
snake_case : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
import random
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Dict = num - 1
snake_case : Optional[int] = 0
while s % 2 == 0:
snake_case : int = s // 2
t += 1
for _ in range(5 ):
snake_case : Union[str, Any] = random.randrange(2 , num - 1 )
snake_case : Dict = pow(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if v != 1:
snake_case : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case : Dict = i + 1
snake_case : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowerCamelCase : int ):
if num < 2:
return False
snake_case : Tuple = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : int = 1024 ):
while True:
snake_case : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__lowerCamelCase ):
return num
if __name__ == "__main__":
__lowerCamelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import math
def UpperCamelCase ( __lowerCamelCase : Tuple ):
snake_case : Optional[Any] = []
snake_case : List[str] = 2
snake_case : List[str] = int(math.sqrt(a__ ) ) # Size of every segment
snake_case : Tuple = [True] * (end + 1)
snake_case : Optional[int] = []
while start <= end:
if temp[start] is True:
in_prime.append(a__ )
for i in range(start * start , end + 1 , a__ ):
snake_case : List[Any] = False
start += 1
prime += in_prime
snake_case : str = end + 1
snake_case : Optional[int] = min(2 * end , a__ )
while low <= n:
snake_case : Tuple = [True] * (high - low + 1)
for each in in_prime:
snake_case : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a__ , high + 1 , a__ ):
snake_case : Tuple = False
for j in range(len(a__ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case : List[str] = high + 1
snake_case : Optional[int] = min(high + end , a__ )
return prime
print(sieve(10**6))
| 368 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase ( a__ ):
def __init__(self : List[str] , snake_case__ : str = "▁" , snake_case__ : bool = True , snake_case__ : Union[str, AddedToken] = "<unk>" , snake_case__ : Union[str, AddedToken] = "</s>" , snake_case__ : Union[str, AddedToken] = "<pad>" , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
snake_case : List[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case : Tuple = token_dict["token"]
snake_case : List[str] = Tokenizer(Unigram() )
snake_case : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
snake_case : str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=snake_case__ , add_prefix_space=snake_case__ ),
pre_tokenizers.Digits(individual_digits=snake_case__ ),
pre_tokenizers.Punctuation(),
] )
snake_case : Optional[Any] = decoders.Metaspace(replacement=snake_case__ , add_prefix_space=snake_case__ )
snake_case : Optional[Any] = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
snake_case : List[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 80_00 , snake_case__ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=snake_case__ , special_tokens=self.special_tokens_list , show_progress=snake_case__ , )
if isinstance(snake_case__ , snake_case__ ):
snake_case : Dict = [files]
self._tokenizer.train(snake_case__ , trainer=snake_case__ )
self.add_unk_id()
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Union[Iterator[str], Iterator[Iterator[str]]] , snake_case__ : int = 80_00 , snake_case__ : bool = True , ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=snake_case__ , special_tokens=self.special_tokens_list , show_progress=snake_case__ , )
self._tokenizer.train_from_iterator(snake_case__ , trainer=snake_case__ )
self.add_unk_id()
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = json.loads(self._tokenizer.to_str() )
snake_case : List[str] = self.special_tokens["unk"]["id"]
snake_case : List[Any] = Tokenizer.from_str(json.dumps(snake_case__ ) )
| 369 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : str = ReformerTokenizer
A__ : Union[str, Any] = ReformerTokenizerFast
A__ : int = True
A__ : Any = False
A__ : Dict = True
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
super().setUp()
snake_case : int = ReformerTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = '''<s>'''
snake_case : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_00 )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : List[Any] = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : Any = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case : Any = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : Tuple = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : Dict = self.get_rust_tokenizer()
snake_case : Optional[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
snake_case : List[Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int=15 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : int = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
snake_case : Optional[int] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : Dict = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="max_length" , )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : int = ReformerTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : str = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = '''Hello World!'''
snake_case : List[Any] = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Union[str, Any] = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Optional[Any] = ''' '''.join(_SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="pt" )
snake_case : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
snake_case : str = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Optional[Any] = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(_SCREAMING_SNAKE_CASE )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : List[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_SCREAMING_SNAKE_CASE , sequences=_SCREAMING_SNAKE_CASE , )
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
from math import factorial
class UpperCAmelCase :
def __init__(self : int , snake_case__ : Any , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = real
if isinstance(snake_case__ , snake_case__ ):
snake_case : Any = [1] * rank
else:
snake_case : Dict = rank
def __repr__(self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case__ )
def __add__(self : Any , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return Dual(self.real + other , self.duals )
snake_case : int = self.duals.copy()
snake_case : List[str] = other.duals.copy()
if len(snake_case__ ) > len(snake_case__ ):
o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
elif len(snake_case__ ) < len(snake_case__ ):
s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
snake_case : Tuple = []
for i in range(len(snake_case__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case__ )
A__ : Any = __add__
def __sub__(self : str , snake_case__ : Any ) -> List[str]:
'''simple docstring'''
return self + other * -1
def __mul__(self : str , snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case__ )
snake_case : List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case__ )
A__ : Dict = __mul__
def __truediv__(self : str , snake_case__ : Any ) -> str:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case__ )
raise ValueError
def __floordiv__(self : Any , snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Any = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case__ )
raise ValueError
def __pow__(self : str , snake_case__ : str ) -> int:
'''simple docstring'''
if n < 0 or isinstance(snake_case__ , snake_case__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
snake_case : int = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
if not callable(lowerCamelCase__ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase__ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("differentiate() requires an int as input for order" )
snake_case : Optional[Any] = Dual(lowerCamelCase__ , 1 )
snake_case : str = func(lowerCamelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
from __future__ import annotations
__lowerCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] , ):
snake_case : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
snake_case : Tuple = 1
snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
snake_case : Tuple = init[0]
snake_case : List[Any] = init[1]
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case : List[str] = [[f, g, x, y]]
snake_case : Tuple = False # flag that is set when search is complete
snake_case : Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case : Dict = cell.pop()
snake_case : Tuple = next_cell[2]
snake_case : str = next_cell[3]
snake_case : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case : Optional[Any] = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
snake_case : Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case : Any = g + cost
snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case : List[Any] = 1
snake_case : List[str] = i
snake_case : Union[str, Any] = []
snake_case : List[Any] = goal[0]
snake_case : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case : Optional[int] = x - DIRECTIONS[action[x][y]][0]
snake_case : Optional[int] = y - DIRECTIONS[action[x][y]][1]
snake_case : Optional[Any] = xa
snake_case : List[str] = ya
invpath.append([x, y] )
snake_case : Tuple = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__lowerCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__lowerCamelCase = 1
# the cost map which pushes the path closer to the goal
__lowerCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCamelCase = 99
__lowerCamelCase, __lowerCamelCase = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : int , *snake_case__ : Any , **snake_case__ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase ( _lowerCamelCase ):
A__ : Optional[Any] = "beit"
def __init__(self : str , snake_case__ : Tuple=81_92 , snake_case__ : Tuple=7_68 , snake_case__ : Tuple=12 , snake_case__ : Optional[int]=12 , snake_case__ : Optional[Any]=30_72 , snake_case__ : Optional[Any]="gelu" , snake_case__ : str=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : int=1e-12 , snake_case__ : Any=2_24 , snake_case__ : Dict=16 , snake_case__ : List[Any]=3 , snake_case__ : Tuple=False , snake_case__ : Tuple=False , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=False , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : List[Any]=True , snake_case__ : List[str]=[3, 5, 7, 11] , snake_case__ : Optional[Any]=[1, 2, 3, 6] , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=0.4 , snake_case__ : Tuple=2_56 , snake_case__ : List[str]=1 , snake_case__ : Union[str, Any]=False , snake_case__ : int=2_55 , **snake_case__ : Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : Optional[int] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : str = intermediate_size
snake_case : str = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : Optional[Any] = layer_norm_eps
snake_case : Optional[int] = image_size
snake_case : Tuple = patch_size
snake_case : List[str] = num_channels
snake_case : Any = use_mask_token
snake_case : List[Any] = use_absolute_position_embeddings
snake_case : Optional[Any] = use_relative_position_bias
snake_case : List[str] = use_shared_relative_position_bias
snake_case : Tuple = layer_scale_init_value
snake_case : Any = drop_path_rate
snake_case : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case : Any = out_indices
snake_case : int = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case : Optional[int] = use_auxiliary_head
snake_case : Optional[Any] = auxiliary_loss_weight
snake_case : Union[str, Any] = auxiliary_channels
snake_case : Optional[Any] = auxiliary_num_convs
snake_case : Any = auxiliary_concat_input
snake_case : int = semantic_loss_ignore_index
class UpperCAmelCase ( _lowerCamelCase ):
A__ : Union[str, Any] = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
return 1e-4
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCamelCase = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
snake_case : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(snake_case__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Any=None ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
snake_case : Dict = black.format_str(snake_case__ , mode=snake_case__ )
snake_case : Optional[int] = os.path.join(self.transformer_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : int = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , snake_case__ ) , )
# Copy consistency with a really long name
snake_case : Optional[int] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , snake_case__ , overwrite_result=re.sub("Bert" , "TestModel" , snake_case__ ) , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
snake_case : Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
snake_case : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case : Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
snake_case : Optional[Any] = check_copies.convert_to_localized_md(
snake_case__ , snake_case__ , localized_readme["format_model_list"] )
self.assertFalse(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
snake_case : List[str] = check_copies.convert_to_localized_md(
snake_case__ , snake_case__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(snake_case__ )
snake_case : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
snake_case : str = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case : Dict = check_copies.convert_to_localized_md(
snake_case__ , snake_case__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(snake_case__ , snake_case__ )
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : int = 0
snake_case : Dict = number
while duplicate > 0:
snake_case , snake_case : List[str] = divmod(__lowerCamelCase , 10 )
fact_sum += factorial(__lowerCamelCase )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
__lowerCamelCase = int(input("""Enter number: """).strip())
print(
F'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Any = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : Optional[int] = -1
snake_case : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : Tuple = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : int = TextStreamer(lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : str = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : int = -1
snake_case : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : Any = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case : str = tokenizer.decode(greedy_ids[0] )
snake_case : Any = TextIteratorStreamer(lowercase_ )
snake_case : List[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
snake_case : List[str] = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
snake_case : Dict = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : int = -1
snake_case : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : int = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
snake_case : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : Dict = TextStreamer(lowercase_ , skip_prompt=lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : Optional[int] = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("distilgpt2" )
snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowercase_ )
snake_case : Optional[int] = -1
snake_case : Optional[int] = torch.ones((1, 5) , device=lowercase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case : Optional[Any] = TextStreamer(lowercase_ , skip_special_tokens=lowercase_ )
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case : Optional[int] = cs.out[:-1] # Remove the final "\n"
snake_case : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ )
snake_case : Any = -1
snake_case : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case : List[Any] = TextIteratorStreamer(lowercase_ , timeout=0.001 )
snake_case : int = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
snake_case : Union[str, Any] = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_ ):
snake_case : Optional[Any] = ""
for new_text in streamer:
streamer_text += new_text
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
from manim import *
class UpperCAmelCase ( A__ ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : str = Rectangle(height=0.5 , width=0.5 )
snake_case : str = Rectangle(height=0.25 , width=0.25 )
snake_case : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case : str = [mem.copy() for i in range(6 )]
snake_case : Any = [mem.copy() for i in range(6 )]
snake_case : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : List[str] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : Tuple = Text("CPU" , font_size=24 )
snake_case : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
snake_case : int = [mem.copy() for i in range(4 )]
snake_case : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : List[Any] = Text("GPU" , font_size=24 )
snake_case : Union[str, Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
snake_case : Any = [mem.copy() for i in range(6 )]
snake_case : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : List[Any] = Text("Model" , font_size=24 )
snake_case : List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
snake_case : Optional[int] = []
snake_case : Tuple = []
snake_case : Optional[int] = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
snake_case : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
model_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ )
snake_case : str = [mem.copy() for i in range(6 )]
snake_case : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : Dict = Text("Loaded Checkpoint" , font_size=24 )
snake_case : Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCamelCase__ )
snake_case : str = []
snake_case : List[Any] = []
for i, rect in enumerate(lowerCamelCase__ ):
snake_case : str = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
ckpt_arr.append(lowerCamelCase__ )
snake_case : List[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ )
snake_case : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case : Any = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase__ )
snake_case : List[Any] = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
snake_case : List[Any] = [meta_mem.copy() for i in range(6 )]
snake_case : Optional[int] = [meta_mem.copy() for i in range(6 )]
snake_case : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : Dict = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
snake_case : str = Text("Disk" , font_size=24 )
snake_case : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) , Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
snake_case : str = []
for i, rect in enumerate(lowerCamelCase__ ):
snake_case : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(FadeOut(lowerCamelCase__ ) )
snake_case : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ ) , )
self.wait()
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCamelCase = """\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"""
__lowerCamelCase = """\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"""
__lowerCamelCase = """\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"""
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ):
return float((preds == labels).mean() )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Dict="binary" ):
snake_case : Optional[Any] = simple_accuracy(_lowercase , _lowercase )
snake_case : Union[str, Any] = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
snake_case : Tuple = {}
for id_pred, label in zip(_lowercase , _lowercase ):
snake_case : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
snake_case : Any = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case : Tuple = [(pred, label)]
snake_case , snake_case : int = [], []
for question, preds_labels in question_map.items():
snake_case , snake_case : Any = zip(*_lowercase )
snake_case : int = fa_score(y_true=_lowercase , y_pred=_lowercase , average="macro" )
fas.append(_lowercase )
snake_case : Tuple = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
snake_case : int = float(sum(_lowercase ) / len(_lowercase ) )
snake_case : List[Any] = sum(_lowercase ) / len(_lowercase )
snake_case : Union[str, Any] = float(fa_score(y_true=_lowercase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "cb":
return acc_and_fa(__A , __A , fa_avg="macro" )
elif self.config_name == "record":
snake_case : str = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
snake_case : List[Any] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__A , __A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__A , __A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCamelCase = "base_with_context"
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str ):
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
snake_case : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : List[Any] = weights[f"""layers_{lyr_num}"""]
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : Optional[Any] = ly_weight["""attention"""]
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
snake_case : Tuple = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : int = weights[f"""layers_{lyr_num}"""]
snake_case : List[str] = ly_weight["""attention"""]
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : str = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple ):
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCamelCase )
snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case : Dict = weights[f"""layers_{lyr_num}"""]
snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Union[str, Any] = ly_weight["""self_attention"""]
snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Union[str, Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
snake_case : Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case : int = jnp.tree_util.tree_map(onp.array , __lowerCamelCase )
snake_case : Any = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
snake_case : str = os.path.join(args.checkpoint_path , ".." , "config.gin" )
snake_case : Tuple = inference.parse_training_gin_file(__lowerCamelCase , __lowerCamelCase )
snake_case : str = inference.InferenceModel(args.checkpoint_path , __lowerCamelCase )
snake_case : Optional[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
snake_case : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case : Dict = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , __lowerCamelCase )
snake_case : int = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , __lowerCamelCase )
snake_case : Dict = load_decoder(ta_checkpoint["target"]["decoder"] , __lowerCamelCase )
snake_case : Dict = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
snake_case : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=__lowerCamelCase , continuous_encoder=__lowerCamelCase , decoder=__lowerCamelCase , scheduler=__lowerCamelCase , melgan=__lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__lowerCamelCase = parser.parse_args()
main(args)
| 359 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]="pt" ):
snake_case : int = {"add_prefix_space": True} if isinstance(_a , _a ) and not line.startswith(" " ) else {}
snake_case : Optional[int] = padding_side
return tokenizer(
[line] , max_length=_a , padding="max_length" if pad_to_max_length else None , truncation=_a , return_tensors=_a , add_special_tokens=_a , **_a , )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=None , ):
snake_case : str = input_ids.ne(_a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( A_ ):
def __init__(self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple="train" , snake_case__ : Tuple=None , snake_case__ : Dict=None , snake_case__ : Dict=None , snake_case__ : Optional[int]="" , ) -> Any:
'''simple docstring'''
super().__init__()
snake_case : Union[str, Any] = Path(lowerCamelCase_ ).joinpath(type_path + ".source" )
snake_case : Optional[int] = Path(lowerCamelCase_ ).joinpath(type_path + ".target" )
snake_case : Union[str, Any] = self.get_char_lens(self.src_file )
snake_case : Dict = max_source_length
snake_case : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
snake_case : Optional[int] = tokenizer
snake_case : List[Any] = prefix
if n_obs is not None:
snake_case : Optional[int] = self.src_lens[:n_obs]
snake_case : Any = src_lang
snake_case : Union[str, Any] = tgt_lang
def __len__(self : List[Any] ) -> List[str]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__(self : Any , snake_case__ : Union[str, Any] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case : Optional[Any] = index + 1 # linecache starts at 1
snake_case : str = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("\n" )
snake_case : int = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("\n" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
snake_case : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
snake_case : int = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , "right" )
snake_case : Tuple = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , "right" )
snake_case : Tuple = source_inputs["input_ids"].squeeze()
snake_case : Tuple = target_inputs["input_ids"].squeeze()
snake_case : Optional[int] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : int ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case : List[str] = torch.stack([x["input_ids"] for x in batch] )
snake_case : Tuple = torch.stack([x["attention_mask"] for x in batch] )
snake_case : Tuple = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
snake_case : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
snake_case : Tuple = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
snake_case , snake_case : int = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
snake_case : Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__lowerCamelCase = getLogger(__name__)
def UpperCamelCase ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(_a ) )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : str = get_git_info()
save_json(_a , os.path.join(_a , "git_log.json" ) )
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int=4 , **__lowerCamelCase : List[str] ):
with open(_a , "w" ) as f:
json.dump(_a , _a , indent=_a , **_a )
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
with open(_a ) as f:
return json.load(_a )
def UpperCamelCase ( ):
snake_case : Optional[int] = git.Repo(search_parent_directories=_a )
snake_case : Optional[Any] = {
"repo_id": str(_a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ):
return list(map(_a , _a ) )
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
with open(_a , "wb" ) as f:
return pickle.dump(_a , _a )
def UpperCamelCase ( __lowerCamelCase : str ):
def remove_articles(__lowerCamelCase : str ):
return re.sub(r"\b(a|an|the)\b" , " " , _a )
def white_space_fix(__lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
snake_case : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_a ) ) ) )
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
snake_case : str = normalize_answer(_a ).split()
snake_case : Tuple = normalize_answer(_a ).split()
snake_case : List[Any] = Counter(_a ) & Counter(_a )
snake_case : Dict = sum(common.values() )
if num_same == 0:
return 0
snake_case : Dict = 1.0 * num_same / len(_a )
snake_case : Any = 1.0 * num_same / len(_a )
snake_case : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
return normalize_answer(_a ) == normalize_answer(_a )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
assert len(_a ) == len(_a )
snake_case : Dict = 0
for hypo, pred in zip(_a , _a ):
em += exact_match_score(_a , _a )
if len(_a ) > 0:
em /= len(_a )
return {"em": em}
def UpperCamelCase ( __lowerCamelCase : Tuple ):
return model_prefix.startswith("rag" )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
snake_case : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case : List[Any] = "dropout_rate"
for p in extra_params:
if getattr(_a , _a , _a ):
if not hasattr(_a , _a ) and not hasattr(_a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_a ) )
delattr(_a , _a )
continue
snake_case : str = p if hasattr(_a , _a ) else equivalent_param[p]
setattr(_a , _a , getattr(_a , _a ) )
delattr(_a , _a )
return hparams, config
| 360 |
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10 | 0 |
"""simple docstring"""
import requests
__lowerCamelCase = """""" # <-- Put your OpenWeatherMap appid here!
__lowerCamelCase = """https://api.openweathermap.org/data/2.5/"""
def UpperCamelCase ( __lowerCamelCase : Any = "Chicago" , __lowerCamelCase : Union[str, Any] = APPID ):
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def UpperCamelCase ( __lowerCamelCase : Dict = "Kolkata, India" , __lowerCamelCase : Optional[int] = APPID ):
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def UpperCamelCase ( __lowerCamelCase : int = 55.68 , __lowerCamelCase : Union[str, Any] = 12.57 , __lowerCamelCase : Union[str, Any] = APPID ):
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowerCamelCase = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCamelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( a__ ):
A__ : Union[str, Any] = """albert"""
def __init__(self : Union[str, Any] , snake_case__ : Union[str, Any]=3_00_00 , snake_case__ : Union[str, Any]=1_28 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Dict=12 , snake_case__ : Tuple=1 , snake_case__ : str=64 , snake_case__ : Tuple=1_63_84 , snake_case__ : List[Any]=1 , snake_case__ : Dict="gelu_new" , snake_case__ : int=0 , snake_case__ : int=0 , snake_case__ : str=5_12 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=1e-12 , snake_case__ : List[Any]=0.1 , snake_case__ : int="absolute" , snake_case__ : Optional[Any]=0 , snake_case__ : Tuple=2 , snake_case__ : int=3 , **snake_case__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case : str = vocab_size
snake_case : Optional[Any] = embedding_size
snake_case : List[str] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Optional[Any] = num_hidden_groups
snake_case : str = num_attention_heads
snake_case : Optional[int] = inner_group_num
snake_case : List[Any] = hidden_act
snake_case : str = intermediate_size
snake_case : List[Any] = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : str = initializer_range
snake_case : int = layer_norm_eps
snake_case : List[str] = classifier_dropout_prob
snake_case : int = position_embedding_type
class UpperCAmelCase ( a__ ):
@property
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase ( ):
snake_case : Dict = 9
snake_case : Union[str, Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case : Optional[Any] = kruskal(__lowerCamelCase , __lowerCamelCase )
snake_case : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.