code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = "▁"
lowercase = {"vocab_file": "sentencepiece.bpe.model"}
lowercase = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
lowercase = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
lowercase = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
lowerCAmelCase = []
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=None , a=None , a=None , a = None , a=None , **a , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , tokenizer_file=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model )
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a )
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case_ = src_lang if src_lang is not None else 'en_XX'
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Union[str, Any]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a ) -> Union[str, Any]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _UpperCamelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCamelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self , a ) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
snake_case_ = [1] * len(self.prefix_tokens )
snake_case_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self , a , a , a , a , **a ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case_ = src_lang
snake_case_ = self(a , add_special_tokens=a , return_tensors=a , **a )
snake_case_ = self.convert_tokens_to_ids(a )
snake_case_ = tgt_lang_id
return inputs
def _UpperCamelCase ( self ) -> str:
snake_case_ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self , a ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , a ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , a ) -> str:
snake_case_ = ''.join(a ).replace(a , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _UpperCamelCase ( self , a , a = "en_XX" , a = None , a = "ro_RO" , **a , ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self , a ) -> None:
snake_case_ = self.lang_code_to_id[src_lang]
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
def _UpperCamelCase ( self , a ) -> None:
snake_case_ = self.lang_code_to_id[lang]
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
| 178 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 178 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase (yaml.SafeLoader ):
"""simple docstring"""
def A_ ( self : Dict, _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE__ : str = [tuple(_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE__ : List[str] = Counter(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def A_ ( self : Tuple, _UpperCAmelCase : Any, _UpperCAmelCase : List[str]=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().construct_mapping(_UpperCAmelCase, deep=_UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(_UpperCAmelCase )
return mapping
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple[Optional[str], str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE__ : Any = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def A_ ( cls : int, _UpperCAmelCase : Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(_UpperCAmelCase, encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE__ : int = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_UpperCAmelCase )
else:
return cls()
def A_ ( self : Optional[Any], _UpperCAmelCase : Path ) -> List[str]:
"""simple docstring"""
if path.exists():
with open(_UpperCAmelCase, encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE__ : Any = readme_file.read()
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._to_readme(_UpperCAmelCase )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as readme_file:
readme_file.write(_UpperCAmelCase )
def A_ ( self : Dict, _UpperCAmelCase : Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE__ : Tuple = _split_yaml_from_readme(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def A_ ( cls : str, _UpperCAmelCase : str ) -> "DatasetMetadata":
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = yaml.load(_UpperCAmelCase, Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE__ : List[Any] = {
(key.replace("-", "_" ) if key.replace("-", "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_", "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
}, sort_keys=_UpperCAmelCase, allow_unicode=_UpperCAmelCase, encoding="utf-8", ).decode("utf-8" )
_lowerCamelCase : Tuple = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCamelCase : Tuple = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
_lowerCamelCase : Optional[int] = ap.parse_args()
_lowerCamelCase : Tuple = Path(args.readme_filepath)
_lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 368 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : List[Any]=sys.maxsize ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "bilinear"
SCREAMING_SNAKE_CASE__ : Optional[int] = max_size
SCREAMING_SNAKE_CASE__ : Optional[int] = short_edge_length
def __call__( self : Optional[int], _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
for img in imgs:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE__ : List[str] = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE__ : int = size * 1.0 / min(_UpperCAmelCase, _UpperCAmelCase )
if h < w:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
if max(_UpperCAmelCase, _UpperCAmelCase ) > self.max_size:
SCREAMING_SNAKE_CASE__ : str = self.max_size * 1.0 / max(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = newh * scale
SCREAMING_SNAKE_CASE__ : List[str] = neww * scale
SCREAMING_SNAKE_CASE__ : Any = int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pil_image.resize((neww, newh), PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : str = img.permute(2, 0, 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE__ : Tuple = nn.functional.interpolate(
_UpperCAmelCase, (newh, neww), mode=self.interp_method, align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE__ : Any = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE__ : List[str] = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE__ : List[Any] = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE__ : Dict = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE__ : Optional[int] = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE__ : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def A_ ( self : str, _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
nn.functional.pad(
_UpperCAmelCase, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, )
for size, im in zip(_UpperCAmelCase, _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i], torch.Tensor ):
images.insert(_UpperCAmelCase, images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i], torch.Tensor ):
images.insert(
_UpperCAmelCase, torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ), input_format=self.input_format ) )
.to(self.device )
.float(), )
# resize smallest edge
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE__ : Tuple = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE__ : List[Any] = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.true_divide(_UpperCAmelCase, _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple[int, int] ) -> List[Any]:
'''simple docstring'''
assert torch.isfinite(SCREAMING_SNAKE_CASE__ ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
| 191 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowerCamelCase : int = logging.getLogger(__name__)
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
SCREAMING_SNAKE_CASE_ : str = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `<s>`
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE_ : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
SCREAMING_SNAKE_CASE_ : int = fp.readlines()
logger.info("Start encoding" )
logger.info(f'{len(lowerCAmelCase )} examples to process.' )
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0_0_0_0
SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time()
for text in data:
SCREAMING_SNAKE_CASE_ : Tuple = f'{bos} {text.strip()} {sep}'
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
rslt.append(lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
SCREAMING_SNAKE_CASE_ : Dict = time.time()
logger.info("Finished binarization" )
logger.info(f'{len(lowerCAmelCase )} examples processed.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.uintaa(lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE_ : Dict = [np.intaa(lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Any = 'imagegpt'
lowercase : List[str] = ['past_key_values']
lowercase : str = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self :Tuple ,_UpperCamelCase :Optional[int]=5_1_2 + 1 ,_UpperCamelCase :Tuple=3_2 * 3_2 ,_UpperCamelCase :Dict=5_1_2 ,_UpperCamelCase :List[Any]=2_4 ,_UpperCamelCase :Any=8 ,_UpperCamelCase :int=None ,_UpperCamelCase :int="quick_gelu" ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Optional[Any]=0.1 ,_UpperCamelCase :Any=1E-5 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Dict=False ,_UpperCamelCase :int=False ,_UpperCamelCase :Optional[int]=False ,**_UpperCamelCase :Any ,):
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[int] = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[int] = n_layer
snake_case_ : Optional[int] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : List[str] = activation_function
snake_case_ : str = resid_pdrop
snake_case_ : Optional[int] = embd_pdrop
snake_case_ : Optional[Any] = attn_pdrop
snake_case_ : Tuple = layer_norm_epsilon
snake_case_ : Optional[Any] = initializer_range
snake_case_ : List[Any] = scale_attn_weights
snake_case_ : Tuple = use_cache
snake_case_ : Dict = scale_attn_by_inverse_layer_idx
snake_case_ : List[str] = reorder_and_upcast_attn
snake_case_ : str = tie_word_embeddings
super().__init__(tie_word_embeddings=_UpperCamelCase ,**_UpperCamelCase )
class __UpperCamelCase ( lowercase__ ):
@property
def a__ ( self :int ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def a__ ( self :Tuple ,_UpperCamelCase :"FeatureExtractionMixin" ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = -1 ,_UpperCamelCase :bool = False ,_UpperCamelCase :Optional["TensorType"] = None ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 3_2 ,_UpperCamelCase :int = 3_2 ,):
snake_case_ : Dict = self._generate_dummy_images(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = dict(preprocessor(images=_UpperCamelCase ,return_tensors=_UpperCamelCase ) )
return inputs | 8 |
'''simple docstring'''
import re
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[Any] = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
__A : int = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 8 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DebertaVaTokenizer
UpperCamelCase = DebertaVaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def snake_case_( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = """this is a test"""
_SCREAMING_SNAKE_CASE = """this is a test"""
return input_text, output_text
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = """<pad>"""
_SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(A ) , 3_0001 )
def snake_case_( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def snake_case_( self ) -> Any:
# fmt: off
_SCREAMING_SNAKE_CASE = """ \tHeLLo!how \n Are yoU? """
_SCREAMING_SNAKE_CASE = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , do_lower_case=A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(A , do_lower_case=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def snake_case_( self ) -> List[Any]:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def snake_case_( self ) -> Optional[int]:
pass
def snake_case_( self ) -> Optional[Any]:
# fmt: off
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
def snake_case_( self ) -> List[str]:
# fmt: off
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
def snake_case_( self ) -> str:
# fmt: off
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
def snake_case_( self ) -> Union[str, Any]:
# fmt: off
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
def snake_case_( self ) -> List[str]:
# fmt: off
_SCREAMING_SNAKE_CASE = """ \tHeLLo!how \n Are yoU? """
_SCREAMING_SNAKE_CASE = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(A , do_lower_case=A , split_by_punct=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(A , add_special_tokens=A ) )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A , add_special_tokens=A ) )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A , add_special_tokens=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = """This is a test"""
_SCREAMING_SNAKE_CASE = [13, 1, 4398, 25, 21, 1289]
_SCREAMING_SNAKE_CASE = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
_SCREAMING_SNAKE_CASE = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A , keep_accents=A )
_SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(A , keep_accents=A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A , A )
# fmt: off
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_SCREAMING_SNAKE_CASE = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
_SCREAMING_SNAKE_CASE = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A , A )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = DebertaVaTokenizer(A )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A , A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A , )
@slow
def snake_case_( self ) -> List[str]:
# fmt: off
_SCREAMING_SNAKE_CASE = {"""input_ids""": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 58 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase_ = logging.getLogger(__name__)
lowercase_ = """Hello world! cécé herlolip"""
lowercase_ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
_SCREAMING_SNAKE_CASE = AbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) , __lowerCamelCase )
original.eval()
_SCREAMING_SNAKE_CASE = BertAbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_SCREAMING_SNAKE_CASE = encoder_input_ids
_SCREAMING_SNAKE_CASE = decoder_input_ids
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_SCREAMING_SNAKE_CASE = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = original.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = new_model.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 58 | 1 |
import math
def lowerCamelCase__ ( lowercase = 100 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = sum(i * i for i in range(1 , n + 1 ) )
SCREAMING_SNAKE_CASE : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 319 |
def lowerCamelCase__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case = generate_large_matrix()
snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def lowerCamelCase__ ( ):
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
SCREAMING_SNAKE_CASE : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 319 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 285 |
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a__ = get_tests_dir('''fixtures''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
_a : Dict = mock.Mock()
_a : List[Any] = 5_0_0
_a : Optional[Any] = {}
_a : Any = HTTPError
_a : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_a : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
_a : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
_a : List[str] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls ) -> Tuple:
_a : Optional[int] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls ) -> int:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowercase ( self ) -> str:
_a : int = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
_a : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''test-feature-extractor''' , push_to_hub=_a , use_auth_token=self._token )
_a : Dict = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> List[Any]:
_a : int = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
_a : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_a , use_auth_token=self._token )
_a : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
_a : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
_a : str = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 357 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 8 , UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.random.default_rng(seed=UpperCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase__ : List[str] = rng.integers(2 , size=UpperCamelCase )
# The set of states Alice will prepare.
lowerCAmelCase__ : Optional[Any] = rng.integers(2 , size=UpperCamelCase )
# Measurement basis for Bob's qubits.
lowerCAmelCase__ : List[Any] = rng.integers(2 , size=UpperCamelCase )
# Quantum Circuit to simulate BB84
lowerCAmelCase__ : Any = qiskit.QuantumCircuit(UpperCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase__ : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase__ : Optional[int] = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1 , seed_simulator=UpperCamelCase )
# Returns the result of measurement.
lowerCAmelCase__ : Union[str, Any] = job.result().get_counts(UpperCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase__ : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase__ : Tuple = gen_key[:key_len] if len(UpperCamelCase ) >= key_len else gen_key.ljust(UpperCamelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 37 | from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Dict , _lowercase : CLIPSegForImageSegmentation , _lowercase : CLIPSegProcessor , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase : StableDiffusionSafetyChecker , _lowercase : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowercase , segmentation_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def __a ( self : List[Any] , _lowercase : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def __a ( self : Any ):
"""simple docstring"""
self.enable_attention_slicing(_lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self : Optional[int] ):
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , _lowercase : Union[str, List[str]] , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] , _lowercase : str , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
SCREAMING_SNAKE_CASE__ = self.segmentation_model(**_lowercase )
SCREAMING_SNAKE_CASE__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , )
| 219 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__lowerCamelCase = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
A__ = [image]
A__ = [trans(img.convert('RGB' ) ) for img in image]
A__ = torch.stack(_UpperCAmelCase )
return image
class UpperCamelCase__( snake_case__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase_ ,scheduler=UpperCAmelCase_ )
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
A__ = min(int(num_inference_steps * strength ) ,UpperCAmelCase_ )
A__ = max(num_inference_steps - init_timestep ,0 )
A__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[str]:
if not isinstance(UpperCAmelCase_ ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_ )}''' )
A__ = image.to(device=UpperCAmelCase_ ,dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A__ = init_latents.shape
A__ = randn_tensor(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,device=UpperCAmelCase_ ,dtype=UpperCAmelCase_ )
# get latents
print('add noise to latents at timestep' ,UpperCAmelCase_ )
A__ = self.scheduler.add_noise(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
A__ = init_latents
return latents
@torch.no_grad()
def __call__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.8 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = 50 ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> int:
self.check_inputs(UpperCAmelCase_ )
# 2. Preprocess image
A__ = preprocess(UpperCAmelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ ,device=self.device )
A__ = self.get_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,self.device )
A__ = timesteps[:1].repeat(UpperCAmelCase_ )
# 4. Prepare latent variables
A__ = self.prepare_latents(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,self.unet.dtype ,self.device ,UpperCAmelCase_ )
A__ = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase_ ):
# 1. predict noise model_output
A__ = self.unet(UpperCAmelCase_ ,UpperCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,eta=UpperCAmelCase_ ,use_clipped_model_output=UpperCAmelCase_ ,generator=UpperCAmelCase_ ,).prev_sample
A__ = (image / 2 + 0.5).clamp(0 ,1 )
A__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 365 | """simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase : Optional[Any] = TypeVar("T")
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""")
else:
lowercase_ = n
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : T):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(lowerCAmelCase_)
else:
self.dq_store.remove(lowerCAmelCase_)
self.dq_store.appendleft(lowerCAmelCase_)
self.key_reference.add(lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
for k in self.dq_store:
print(lowerCAmelCase_)
def __repr__( self : Tuple):
"""simple docstring"""
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store)}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 136 |
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if len(__lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase_ = len(__lowerCAmelCase )
lowercase_ = matrix_length // 2
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [
[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )
]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(__lowerCAmelCase ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
print("""\n""".join(str(__lowerCAmelCase ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = matrix_addition(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
# construct the new matrix from our 4 quadrants
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase )[1] != matrix_dimensions(__lowerCAmelCase )[0]:
lowercase_ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase_ = max(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase_ = int(math.pow(2 , math.ceil(math.loga(__lowerCAmelCase ) ) ) )
lowercase_ = matrixa
lowercase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase_ = actual_strassen(__lowerCAmelCase , __lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[int] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 136 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Union[str, Any] )->Dict:
_UpperCAmelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCAmelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_UpperCAmelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_UpperCAmelCase = shift_tokens_right(__A , model.config.pad_token_id , model.config.decoder_start_token_id )
_UpperCAmelCase = model(__A , decoder_input_ids=__A ).logits
_UpperCAmelCase = optax.softmax_cross_entropy(__A , onehot(__A , logits.shape[-1] ) ).mean()
_UpperCAmelCase = -(labels.shape[-1] * loss.item())
_UpperCAmelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=1 ) -> str:
lowerCamelCase_ = tokenizer
lowerCamelCase_ = dataset
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
lowerCamelCase_ = n_copies
def __iter__( self : Dict ) -> Any:
lowerCamelCase_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase_ = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( __snake_case ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
lowerCamelCase_ = start_length
lowerCamelCase_ = eof_strings
lowerCamelCase_ = tokenizer
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> Tuple:
lowerCamelCase_ = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict=20 , **_lowerCamelCase : Dict ) -> List[str]:
lowerCamelCase_ = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase_ = batch['ids'].shape[-1]
lowerCamelCase_ = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase_ = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase_ = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ = generated_tokens.cpu().numpy()
lowerCamelCase_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase_ = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase__ ( ) -> Tuple:
# Setup configuration
lowerCamelCase_ = HfArgumentParser(_lowerCamelCase )
lowerCamelCase_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ = 'false'
if args.num_workers is None:
lowerCamelCase_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ = load_dataset('openai_humaneval' )
lowerCamelCase_ = load_metric('code_eval' )
lowerCamelCase_ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase_ = args.n_samples // args.batch_size
lowerCamelCase_ = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase_ = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase_ = human_eval['test'][task]['test']
lowerCamelCase_ = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_ , lowerCamelCase_ = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 183 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BarthezTokenizer
__snake_case = BarthezTokenizerFast
__snake_case = True
__snake_case = True
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
a = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCAmelCase )
a = tokenizer
def __lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
a = '''<pad>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__UpperCAmelCase ) , 101_122 )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [0, 57, 3_018, 70_307, 91, 2]
a = self.tokenizer(
__UpperCAmelCase , max_length=len(__UpperCAmelCase ) , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = '''I was born in 92000, and this is falsé.'''
a = tokenizer.tokenize(__UpperCAmelCase )
a = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
a = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__UpperCAmelCase , )
| 26 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowerCamelCase__ = int(input("""Enter number: """).strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
try:
with open(UpperCamelCase__ , 'rb' ) as flax_state_f:
snake_case_ = from_bytes(UpperCamelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
snake_case_ = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase : x.dtype == jnp.bfloataa , UpperCamelCase__ ) ).values()
if any(UpperCamelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
snake_case_ = jax.tree_util.tree_map(
lambda UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase__ )
snake_case_ = ''
snake_case_ = flatten_dict(UpperCamelCase__ , sep='.' )
snake_case_ = pt_model.state_dict()
# keep track of unexpected & missing keys
snake_case_ = []
snake_case_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case_ = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
snake_case_ = flax_key_tuple_array[:-1] + ['weight']
snake_case_ = jnp.transpose(UpperCamelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
snake_case_ = flax_key_tuple_array[:-1] + ['weight']
snake_case_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
snake_case_ = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase__ ):
snake_case_ = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
snake_case_ = '.'.join(UpperCamelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
snake_case_ = np.asarray(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , np.ndarray ) else flax_tensor
snake_case_ = torch.from_numpy(UpperCamelCase__ )
# remove from missing keys
missing_keys.remove(UpperCamelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase__ )
pt_model.load_state_dict(UpperCamelCase__ )
# re-transform missing_keys to list
snake_case_ = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(UpperCamelCase__ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 363 | """simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312 | 0 |
'''simple docstring'''
import random
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = a[left_index]
UpperCamelCase__ :Union[str, Any] = left_index + 1
for j in range(left_index + 1 , __a ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = a[i - 1], a[left_index]
return i - 1
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
if left < right:
UpperCamelCase__ :Any = random.randint(__a , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :Any = partition(__a , __a , __a )
quick_sort_random(
__a , __a , __a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__a , pivot_index + 1 , __a ) # recursive quicksort to the right of the pivot point
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :Any = input('''Enter numbers separated by a comma:\n''' ).strip()
UpperCamelCase__ :str = [int(__a ) for item in user_input.split(''',''' )]
quick_sort_random(__a , 0 , len(__a ) )
print(__a )
if __name__ == "__main__":
main() | 97 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 0 |
"""simple docstring"""
import math
def _a ( _snake_case = 100 ):
"""simple docstring"""
UpperCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 351 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234 | 0 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 1000 , __lowerCamelCase : bool = True ) ->int:
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) ->int:
return int((number_a + number_a) / 2 )
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) ->None:
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(__lowerCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
_SCREAMING_SNAKE_CASE = lower
_SCREAMING_SNAKE_CASE = higher
_SCREAMING_SNAKE_CASE = []
while True:
_SCREAMING_SNAKE_CASE = get_avg(__lowerCamelCase , __lowerCamelCase )
last_numbers.append(__lowerCamelCase )
if answer(__lowerCamelCase ) == "low":
_SCREAMING_SNAKE_CASE = number
elif answer(__lowerCamelCase ) == "high":
_SCREAMING_SNAKE_CASE = number
else:
break
print(F'guess the number : {last_numbers[-1]}' )
print(F'details : {last_numbers!s}' )
def lowerCamelCase ( ) ->None:
_SCREAMING_SNAKE_CASE = int(input("""Enter lower value : """ ).strip() )
_SCREAMING_SNAKE_CASE = int(input("""Enter high value : """ ).strip() )
_SCREAMING_SNAKE_CASE = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 58 | import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__: int = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__magic_name__: Union[str, Any] = {"allegro/herbert-base-cased": 514}
__magic_name__: str = {}
class snake_case__ ( SCREAMING_SNAKE_CASE__ ):
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = HerbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Dict:
super().__init__(
a_ , a_ , tokenizer_file=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , sep_token=a_ , **a_ , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Any:
__magic_name__ : int = [self.cls_token_id]
__magic_name__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
__magic_name__ : List[Any] = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 359 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Dict = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''blenderbot-small'''
lowercase__ : Optional[int] = ['''past_key_values''']
lowercase__ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Optional[int] = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[str] = encoder_attention_heads
__magic_name__ : List[Any] = decoder_ffn_dim
__magic_name__ : str = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Optional[int] = init_std
__magic_name__ : Dict = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Optional[int] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : List[Any] = {0: """batch"""}
__magic_name__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ ,__magic_name__ : Dict = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any = super().outputs
else:
__magic_name__ : int = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__magic_name__ ,__magic_name__ : str = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__magic_name__ : Optional[int] = seq_length if not self.use_past else 1
__magic_name__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Dict = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : List[Any] = common_inputs["""input_ids"""].shape
__magic_name__ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ ,__magic_name__ : str = self.num_attention_heads
__magic_name__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Any = decoder_seq_length + 3
__magic_name__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ ,__magic_name__ : List[str] = self.num_layers
__magic_name__ : Optional[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__magic_name__ : Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__magic_name__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : List[Any] = seqlen + 2
__magic_name__ ,__magic_name__ : Any = self.num_layers
__magic_name__ ,__magic_name__ : int = self.num_attention_heads
__magic_name__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Optional[int] = common_inputs["""attention_mask"""].dtype
__magic_name__ : Optional[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Tuple = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__magic_name__ : List[str] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
__magic_name__ : str = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 138 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase : List[str] = 2_99_79_24_58
# Symbols
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = symbols('''ct x y z''')
def _SCREAMING_SNAKE_CASE ( a ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def _SCREAMING_SNAKE_CASE ( a ) -> float:
return 1 / sqrt(1 - beta(a ) ** 2 )
def _SCREAMING_SNAKE_CASE ( a ) -> np.ndarray:
return np.array(
[
[gamma(a ), -gamma(a ) * beta(a ), 0, 0],
[-gamma(a ) * beta(a ), gamma(a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _SCREAMING_SNAKE_CASE ( a , a = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
__A : str = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase : str = transform(29_97_92_45)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
UpperCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Any = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_a : List[Any] = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def lowerCamelCase__ ( __snake_case = "dhaka", __snake_case = 5 ) -> int:
"""simple docstring"""
_UpperCamelCase = min(__snake_case, 50 ) # Prevent abuse!
_UpperCamelCase = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_UpperCamelCase = requests.get('''https://www.google.com/search''', params=__snake_case, headers=__snake_case )
_UpperCamelCase = BeautifulSoup(html.text, '''html.parser''' )
_UpperCamelCase = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''', str(soup.select('''script''' ) ) ) )
_UpperCamelCase = json.dumps(__snake_case )
_UpperCamelCase = json.loads(__snake_case )
_UpperCamelCase = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''', __snake_case, )
if not matched_google_image_data:
return 0
_UpperCamelCase = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''', '''''', str(__snake_case ), )
_UpperCamelCase = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''', __snake_case, )
for index, fixed_full_res_image in enumerate(__snake_case ):
if index >= max_images:
return index
_UpperCamelCase = bytes(__snake_case, '''ascii''' ).decode(
'''unicode-escape''' )
_UpperCamelCase = bytes(__snake_case, '''ascii''' ).decode(
'''unicode-escape''' )
_UpperCamelCase = urllib.request.build_opener()
_UpperCamelCase = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(__snake_case )
_UpperCamelCase = F'''query_{query.replace(" ", "_" )}'''
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
urllib.request.urlretrieve( # noqa: S310
__snake_case, F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
_a : Optional[int] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("""Please provide a search term.""")
raise
| 364 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 100 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
"""simple docstring"""
@staticmethod
def lowercase_ ( *_snake_case : Optional[Any], **_snake_case : int ) ->Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self : Optional[int], _snake_case : Dict, _snake_case : Dict, _snake_case : Any ) ->Dict:
snake_case__ : List[Any] = pipeline(
'zero-shot-object-detection', model='hf-internal-testing/tiny-random-owlvit-object-detection' )
snake_case__ : str = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def lowercase_ ( self : Union[str, Any], _snake_case : Optional[int], _snake_case : Optional[int] ) ->Any:
snake_case__ : str = object_detector(examples[0], threshold=0.0 )
snake_case__ : Any = len(_snake_case )
self.assertGreater(_snake_case, 0 )
self.assertEqual(
_snake_case, [
{
'score': ANY(_snake_case ),
'label': ANY(_snake_case ),
'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )},
}
for i in range(_snake_case )
], )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowercase_ ( self : int ) ->Optional[int]:
pass
@require_torch
def lowercase_ ( self : Any ) ->Tuple:
snake_case__ : Dict = pipeline(
'zero-shot-object-detection', model='hf-internal-testing/tiny-random-owlvit-object-detection' )
snake_case__ : Tuple = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png', candidate_labels=['cat', 'remote', 'couch'], threshold=0.6_4, )
self.assertEqual(
nested_simplify(_snake_case, decimals=4 ), [
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
], )
snake_case__ : Optional[int] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
], threshold=0.6_4, )
self.assertEqual(
nested_simplify(_snake_case, decimals=4 ), [
[
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
]
], )
@require_torch
@slow
def lowercase_ ( self : List[Any] ) ->Dict:
snake_case__ : str = pipeline('zero-shot-object-detection' )
snake_case__ : str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], )
self.assertEqual(
nested_simplify(_snake_case, decimals=4 ), [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
], )
snake_case__ : List[str] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
], )
self.assertEqual(
nested_simplify(_snake_case, decimals=4 ), [
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
], )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowercase_ ( self : List[str] ) ->Any:
pass
@require_torch
@slow
def lowercase_ ( self : Optional[int] ) ->Tuple:
snake_case__ : Tuple = 0.2
snake_case__ : Union[str, Any] = pipeline('zero-shot-object-detection' )
snake_case__ : Optional[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], threshold=_snake_case, )
self.assertEqual(
nested_simplify(_snake_case, decimals=4 ), [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
], )
@require_torch
@slow
def lowercase_ ( self : List[Any] ) ->str:
snake_case__ : List[Any] = 2
snake_case__ : Optional[int] = pipeline('zero-shot-object-detection' )
snake_case__ : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], top_k=_snake_case, )
self.assertEqual(
nested_simplify(_snake_case, decimals=4 ), [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
], )
| 277 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ (A : str , A : List[Any] , A : Any ):
# Initialise PyTorch model
snake_case__ : List[Any] = LxmertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : List[str] = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 277 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _A ( UpperCamelCase_ : Optional[int]) -> List[str]:
'''simple docstring'''
__lowercase = 384
if "tiny" in model_name:
__lowercase = [3, 3, 9, 3]
__lowercase = [96, 192, 384, 768]
if "small" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [96, 192, 384, 768]
if "base" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [128, 256, 512, 1024]
__lowercase = 512
if "large" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [192, 384, 768, 1536]
__lowercase = 768
if "xlarge" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [256, 512, 1024, 2048]
__lowercase = 1024
# set label information
__lowercase = 150
__lowercase = "huggingface/label-files"
__lowercase = "ade20k-id2label.json"
__lowercase = json.load(open(hf_hub_download(UpperCamelCase_, UpperCamelCase_, repo_type="dataset"), "r"))
__lowercase = {int(UpperCamelCase_): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = ConvNextConfig(
depths=UpperCamelCase_, hidden_sizes=UpperCamelCase_, out_features=["stage1", "stage2", "stage3", "stage4"])
__lowercase = UperNetConfig(
backbone_config=UpperCamelCase_, auxiliary_in_channels=UpperCamelCase_, num_labels=UpperCamelCase_, idalabel=UpperCamelCase_, labelaid=UpperCamelCase_, )
return config
def _A ( UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
__lowercase = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight"))
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias"))
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight"))
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias"""))
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
])
# fmt: on
return rename_keys
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : List[str], UpperCamelCase_ : Tuple) -> List[str]:
'''simple docstring'''
__lowercase = dct.pop(UpperCamelCase_)
__lowercase = val
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : Tuple) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__lowercase = model_name_to_url[model_name]
__lowercase = torch.hub.load_state_dict_from_url(UpperCamelCase_, map_location="cpu")["state_dict"]
__lowercase = get_upernet_config(UpperCamelCase_)
__lowercase = UperNetForSemanticSegmentation(UpperCamelCase_)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(UpperCamelCase_)
if "bn" in key:
__lowercase = key.replace("bn", "batch_norm")
__lowercase = val
# rename keys
__lowercase = create_rename_keys(UpperCamelCase_)
for src, dest in rename_keys:
rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
model.load_state_dict(UpperCamelCase_)
# verify on image
__lowercase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__lowercase = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_).raw).convert("RGB")
__lowercase = SegformerImageProcessor()
__lowercase = processor(UpperCamelCase_, return_tensors="pt").pixel_values
with torch.no_grad():
__lowercase = model(UpperCamelCase_)
if model_name == "upernet-convnext-tiny":
__lowercase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]])
elif model_name == "upernet-convnext-small":
__lowercase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]])
elif model_name == "upernet-convnext-base":
__lowercase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]])
elif model_name == "upernet-convnext-large":
__lowercase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]])
elif model_name == "upernet-convnext-xlarge":
__lowercase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]])
print("Logits:", outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3], UpperCamelCase_, atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(UpperCamelCase_)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(UpperCamelCase_)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F"upernet-convnext-{size}" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 355 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_a = 5_00_00
_a = 50_00
_a , _a = os.path.split(__file__)
_a = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _A ( UpperCamelCase_ : datasets.Dataset, UpperCamelCase_ : List[str]) -> List[str]:
'''simple docstring'''
for i in range(UpperCamelCase_):
__lowercase = dataset[i]
@get_duration
def _A ( UpperCamelCase_ : datasets.Dataset, UpperCamelCase_ : List[Any], UpperCamelCase_ : int) -> Dict:
'''simple docstring'''
for i in range(0, len(UpperCamelCase_), UpperCamelCase_):
__lowercase = dataset[i : i + batch_size]
@get_duration
def _A ( UpperCamelCase_ : datasets.Dataset, UpperCamelCase_ : Any, UpperCamelCase_ : Optional[int]) -> List[str]:
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase_):
for i in range(UpperCamelCase_):
__lowercase = dataset[i]
@get_duration
def _A ( UpperCamelCase_ : datasets.Dataset, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any]) -> Dict:
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase_):
for i in range(0, UpperCamelCase_, UpperCamelCase_):
__lowercase = dataset[i : i + batch_size]
def _A ( ) -> List[str]:
'''simple docstring'''
__lowercase = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowercase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
__lowercase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset")
__lowercase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")})
__lowercase = generate_example_dataset(
os.path.join(UpperCamelCase_, "dataset.arrow"), UpperCamelCase_, num_examples=UpperCamelCase_, seq_shapes={"list": (100,)}, )
print("first set of iterations")
for func, kwargs in functions:
print(func.__name__, str(UpperCamelCase_))
__lowercase = func(UpperCamelCase_, **UpperCamelCase_)
print("shuffling dataset")
__lowercase = dataset.shuffle()
print("Second set of iterations (after shuffling")
for func, kwargs in functions_shuffled:
print("shuffled ", func.__name__, str(UpperCamelCase_))
__lowercase = func(
UpperCamelCase_, **UpperCamelCase_)
with open(UpperCamelCase_, "wb") as f:
f.write(json.dumps(UpperCamelCase_).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 144 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
_UpperCAmelCase = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
_UpperCAmelCase = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
_UpperCAmelCase = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
A_ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
A_ : Optional[Any] = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )}
| 140 | from collections import deque
from .hash_table import HashTable
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase )
A_ : int = self.values[key]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase ) == 0
):
return key
return super()._collision_resolution(lowercase , lowercase )
| 140 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _lowerCAmelCase :
'''simple docstring'''
a_ : Union[str, Any] =None
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Optional[Any] = os.path.join(UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase )
_snake_case : Optional[int] = self.feature_extraction_class.from_json_file(UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : List[str] = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
_snake_case : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase )
| 362 |
from __future__ import annotations
lowerCAmelCase_ = """#"""
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
_snake_case : dict = {}
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[Any] = self._trie
for char in text:
if char not in trie:
_snake_case : int = {}
_snake_case : int = trie[char]
_snake_case : Optional[Any] = True
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[int] = self._trie
for char in prefix:
if char in trie:
_snake_case : Optional[Any] = trie[char]
else:
return []
return self._elements(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : dict ):
'''simple docstring'''
_snake_case : int = []
for c, v in d.items():
_snake_case : Dict = [' '] if c == END else [(c + s) for s in self._elements(UpperCamelCase )]
result.extend(UpperCamelCase )
return tuple(UpperCamelCase )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( lowerCAmelCase: str )-> tuple:
_snake_case : List[Any] = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( )-> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 260 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
UpperCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.feature_extractor
UpperCAmelCase__ = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase__ = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ = """bias"""
elif "weight" in name:
UpperCAmelCase__ = """weight"""
else:
UpperCAmelCase__ = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase__ = name.split(""".""" )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ = full_name.split("""adaptor.""" )[-1]
UpperCAmelCase__ = name.split(""".""" )
if items[1].isdigit():
UpperCAmelCase__ = int(items[1] )
else:
UpperCAmelCase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
UpperCAmelCase__ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
UpperCAmelCase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
UpperCAmelCase__ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
UpperCAmelCase__ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
UpperCAmelCase__ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
UpperCAmelCase__ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , add_adapter=SCREAMING_SNAKE_CASE__ , adapter_stride=SCREAMING_SNAKE_CASE__ , adapter_kernel_size=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , output_hidden_size=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# load model
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
UpperCAmelCase__ = model[0].eval()
# load feature extractor
UpperCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ )
# set weights for wav2vec2 encoder
UpperCAmelCase__ = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
# load decoder weights
UpperCAmelCase__ = MBartForCausalLM(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
UpperCAmelCase__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = False
UpperCAmelCase__ = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = hf_wavavec.config.to_dict()
UpperCAmelCase__ = tokenizer.pad_token_id
UpperCAmelCase__ = tokenizer.bos_token_id
UpperCAmelCase__ = tokenizer.eos_token_id
UpperCAmelCase__ = """mbart50"""
UpperCAmelCase__ = """wav2vec2"""
UpperCAmelCase__ = tokenizer.eos_token_id
UpperCAmelCase__ = 250004
UpperCAmelCase__ = tokenizer.eos_token_id
UpperCAmelCase__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config')
UpperCAmelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 346 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 365 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'efficientformer'
def __init__( self : Optional[int] ,__lowerCamelCase : List[int] = [3, 2, 6, 4] ,__lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] ,__lowerCamelCase : List[bool] = [True, True, True, True] ,__lowerCamelCase : int = 4_48 ,__lowerCamelCase : int = 32 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : int = 7 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 8 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 16 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : bool = True ,__lowerCamelCase : bool = True ,__lowerCamelCase : float = 1e-5 ,__lowerCamelCase : str = "gelu" ,__lowerCamelCase : float = 0.02 ,__lowerCamelCase : float = 1e-12 ,__lowerCamelCase : int = 2_24 ,__lowerCamelCase : float = 1e-05 ,**__lowerCamelCase : Dict ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_act
a = hidden_dropout_prob
a = hidden_sizes
a = num_hidden_layers
a = num_attention_heads
a = initializer_range
a = layer_norm_eps
a = patch_size
a = num_channels
a = depths
a = mlp_expansion_ratio
a = downsamples
a = dim
a = key_dim
a = attention_ratio
a = resolution
a = pool_size
a = downsample_patch_size
a = downsample_stride
a = downsample_pad
a = drop_path_rate
a = num_metaad_blocks
a = distillation
a = use_layer_scale
a = layer_scale_init_value
a = image_size
a = batch_norm_eps
| 330 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
class _lowercase :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ) -> Union[str, Any]:
lowerCamelCase : Tuple = data
lowerCamelCase : List[Any] = previous
lowerCamelCase : Optional[int] = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def _UpperCamelCase ( self ) -> int:
return self.data
def _UpperCamelCase ( self ) -> Dict:
return self.next
def _UpperCamelCase ( self ) -> List[str]:
return self.previous
class _lowercase :
def __init__( self , UpperCAmelCase_ ) -> Any:
lowerCamelCase : Union[str, Any] = head
def __iter__( self ) -> List[str]:
return self
def _UpperCamelCase ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
lowerCamelCase : List[Any] = self.current.get_data()
lowerCamelCase : Tuple = self.current.get_next()
return value
class _lowercase :
def __init__( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = None # First node in list
lowerCamelCase : Optional[Any] = None # Last node in list
def __str__( self ) -> Any:
lowerCamelCase : Optional[Any] = self.head
lowerCamelCase : str = []
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase : Tuple = current.get_next()
return " ".join(str(UpperCAmelCase_ ) for node in nodes )
def __contains__( self , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : str = self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase : str = current.get_next()
return False
def __iter__( self ) -> Tuple:
return LinkedListIterator(self.head )
def _UpperCamelCase ( self ) -> List[Any]:
if self.head:
return self.head.get_data()
return None
def _UpperCamelCase ( self ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> None:
if self.head is None:
lowerCamelCase : List[str] = node
lowerCamelCase : Dict = node
else:
self.insert_before_node(self.head , UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> None:
if self.head is None:
self.set_head(UpperCAmelCase_ )
else:
self.insert_after_node(self.tail , UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> None:
lowerCamelCase : Union[str, Any] = Node(UpperCAmelCase_ )
if self.head is None:
self.set_head(UpperCAmelCase_ )
else:
self.set_tail(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
lowerCamelCase : int = node
lowerCamelCase : List[str] = node.previous
if node.get_previous() is None:
lowerCamelCase : List[str] = node_to_insert
else:
lowerCamelCase : str = node_to_insert
lowerCamelCase : List[Any] = node_to_insert
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
lowerCamelCase : Tuple = node
lowerCamelCase : str = node.next
if node.get_next() is None:
lowerCamelCase : Union[str, Any] = node_to_insert
else:
lowerCamelCase : int = node_to_insert
lowerCamelCase : Union[str, Any] = node_to_insert
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
lowerCamelCase : int = 1
lowerCamelCase : Any = Node(UpperCAmelCase_ )
lowerCamelCase : List[str] = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCAmelCase_ , UpperCAmelCase_ )
return
current_position += 1
lowerCamelCase : Union[str, Any] = node.next
self.insert_after_node(self.tail , UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Node:
lowerCamelCase : List[str] = self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase : Optional[int] = node.get_next()
raise Exception('Node not found' )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Any:
if (node := self.get_node(UpperCAmelCase_ )) is not None:
if node == self.head:
lowerCamelCase : str = self.head.get_next()
if node == self.tail:
lowerCamelCase : List[str] = self.tail.get_previous()
self.remove_node_pointers(UpperCAmelCase_ )
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ ) -> None:
if node.get_next():
lowerCamelCase : Dict = node.previous
if node.get_previous():
lowerCamelCase : Optional[int] = node.next
lowerCamelCase : List[str] = None
lowerCamelCase : Union[str, Any] = None
def _UpperCamelCase ( self ) -> int:
return self.head is None
def UpperCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
_A = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
_A = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
_A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
_A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 205 | 0 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = abs(lowerCamelCase)
__lowerCAmelCase = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = abs(lowerCamelCase)
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0)
def __magic_name__( lowerCamelCase):
return sum(int(lowerCamelCase) for c in str(abs(lowerCamelCase)))
def __magic_name__( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase, lowerCamelCase) -> None:
__lowerCAmelCase = F"""{func.__name__}({value})"""
__lowerCAmelCase = timeit(F"""__main__.{call}""", setup='''import __main__''')
print(F"""{call:56} = {func(lowerCamelCase)} -- {timing:.4f} seconds""")
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCamelCase, lowerCamelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 174 |
'''simple docstring'''
import operator
def __magic_name__( lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None):
__lowerCAmelCase = operator.lt if reverse else operator.gt
__lowerCAmelCase = solution or []
if not arr:
return solution
__lowerCAmelCase = [arr.pop(0)]
for i, item in enumerate(lowerCamelCase):
if _operator(lowerCamelCase, sublist[-1]):
sublist.append(lowerCamelCase)
arr.pop(lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase)
else:
while sublist:
__lowerCAmelCase = sublist.pop(0)
for i, xx in enumerate(lowerCamelCase):
if not _operator(lowerCamelCase, lowerCamelCase):
solution.insert(lowerCamelCase, lowerCamelCase)
break
else:
solution.append(lowerCamelCase)
strand_sort(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 174 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :Dict = random.Random()
lowerCAmelCase__ :Tuple = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase__ :List[Any] = []
for _ in range(_SCREAMING_SNAKE_CASE ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase__ :int = np.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa ).reshape(_SCREAMING_SNAKE_CASE )
return output
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = ids_tensor(_SCREAMING_SNAKE_CASE , vocab_size=2 , rng=_SCREAMING_SNAKE_CASE )
# make sure that at least one token is attended to for each batch
lowerCAmelCase__ :Any = 1
return attn_mask
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[int] = None
__magic_name__ :List[str] = ()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase__ :Union[str, Any] = 2
lowerCAmelCase__ :List[Any] = inputs['input_ids'].shape[-1] // 2
lowerCAmelCase__ :Union[str, Any] = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCAmelCase__ :str = jnp.ones_like(__UpperCAmelCase )
lowerCAmelCase__ :int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase__ :List[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase__ :Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self._get_input_ids_and_config()
lowerCAmelCase__ :int = False
lowerCAmelCase__ :List[Any] = max_length
lowerCAmelCase__ :List[Any] = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :int = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase__ :List[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = pt_model_class(__UpperCAmelCase ).eval()
lowerCAmelCase__ :Dict = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params )
lowerCAmelCase__ :Union[str, Any] = flax_model.generate(__UpperCAmelCase ).sequences
lowerCAmelCase__ :Union[str, Any] = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase__ :Union[str, Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self._get_input_ids_and_config()
lowerCAmelCase__ :Any = False
lowerCAmelCase__ :Any = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[str] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Any = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = jit(model.generate )
lowerCAmelCase__ :Optional[Any] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = self._get_input_ids_and_config()
lowerCAmelCase__ :int = True
lowerCAmelCase__ :Optional[int] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Dict = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Dict = max_length
lowerCAmelCase__ :Dict = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[str] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Dict = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Any = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self._get_input_ids_and_config()
lowerCAmelCase__ :int = False
lowerCAmelCase__ :Optional[Any] = max_length
lowerCAmelCase__ :Optional[Any] = 2
lowerCAmelCase__ :Optional[int] = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :str = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self._get_input_ids_and_config()
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Tuple = max_length
lowerCAmelCase__ :Optional[int] = 0.8
lowerCAmelCase__ :Any = 1_0
lowerCAmelCase__ :List[Any] = 0.3
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :Union[str, Any] = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jit(model.generate )
lowerCAmelCase__ :Any = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :Union[str, Any] = max_length
lowerCAmelCase__ :str = 1
lowerCAmelCase__ :Tuple = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = jit(model.generate )
lowerCAmelCase__ :Optional[int] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self._get_input_ids_and_config()
lowerCAmelCase__ :str = max_length
lowerCAmelCase__ :Dict = 2
lowerCAmelCase__ :Dict = 1
lowerCAmelCase__ :Optional[int] = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Dict = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :Tuple = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Union[str, Any] = False
lowerCAmelCase__ :Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Tuple = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :int = jit(model.generate )
lowerCAmelCase__ :Optional[Any] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :Any = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Optional[Any] = True
lowerCAmelCase__ :Dict = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jit(model.generate )
lowerCAmelCase__ :Optional[int] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :int = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Dict = 2
lowerCAmelCase__ :Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Any = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = jit(model.generate )
lowerCAmelCase__ :int = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCAmelCase__ :List[str] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCAmelCase__ :Optional[int] = 'Hello world'
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , 'do_samples' ):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , 'foo' ):
lowerCAmelCase__ :Optional[int] = {'foo': 'bar'}
model.generate(__UpperCAmelCase , **__UpperCAmelCase )
| 254 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A (_SCREAMING_SNAKE_CASE = "" ) ->dict[str, float]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowerCAmelCase__ :str = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
lowerCAmelCase__ :List[Any] = soup.find_all('td' , attrs='titleColumn' )
lowerCAmelCase__ :Optional[int] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
}
def __A (_SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = get_imdb_top_aaa_movies()
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='' ) as out_file:
lowerCAmelCase__ :Dict = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 254 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_a : Dict = hex_num[0] == '-'
if is_negative:
_a : Optional[int] = hex_num[1:]
try:
_a : Optional[Any] = int(lowerCAmelCase_ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_a : int = ''
while int_num > 0:
_a : Union[str, Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__magic_name__: Tuple = "http://www.mocksite.com/file1.txt"
__magic_name__: int = "\"text\": [\"foo\", \"foo\"]"
__magic_name__: Any = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class snake_case__ :
lowercase__ : List[Any] = 200
lowercase__ : Optional[int] = {'''Content-Length''': '''100'''}
lowercase__ : Dict = {}
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
return [bytes(lowerCAmelCase__ , """utf-8""" )]
def UpperCamelCase ( *_A, **_A ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""", [str, list, dict] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
import requests
monkeypatch.setattr(_A, """request""", _A )
__magic_name__ : Union[str, Any] = URL
if issubclass(_A, _A ):
__magic_name__ : Optional[int] = url
elif issubclass(_A, _A ):
__magic_name__ : str = [url]
elif issubclass(_A, _A ):
__magic_name__ : List[str] = {"""train""": url}
__magic_name__ : Optional[int] = """dummy"""
__magic_name__ : Tuple = """downloads"""
__magic_name__ : Dict = tmp_path
__magic_name__ : Any = DownloadConfig(
cache_dir=os.path.join(_A, _A ), use_etag=_A, )
__magic_name__ : List[Any] = DownloadManager(dataset_name=_A, download_config=_A )
__magic_name__ : List[str] = dl_manager.download(_A )
__magic_name__ : str = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_A, _A ):
__magic_name__ : str = [downloaded_paths]
__magic_name__ : Optional[int] = [urls]
elif isinstance(_A, _A ):
assert "train" in downloaded_paths.keys()
__magic_name__ : Union[str, Any] = downloaded_paths.values()
__magic_name__ : str = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_A, _A ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__magic_name__ : List[Any] = Path(_A )
__magic_name__ : Tuple = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__magic_name__ : Tuple = downloaded_path.read_text()
assert content == CONTENT
__magic_name__ : Optional[Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__magic_name__ : Any = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""", [str, list, dict] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = str(_A )
if issubclass(_A, _A ):
__magic_name__ : str = filename
elif issubclass(_A, _A ):
__magic_name__ : Union[str, Any] = [filename]
elif issubclass(_A, _A ):
__magic_name__ : Any = {"""train""": filename}
__magic_name__ : int = """dummy"""
__magic_name__ : Optional[Any] = xz_file.parent
__magic_name__ : Tuple = """extracted"""
__magic_name__ : List[str] = DownloadConfig(
cache_dir=_A, use_etag=_A, )
__magic_name__ : str = DownloadManager(dataset_name=_A, download_config=_A )
__magic_name__ : str = dl_manager.extract(_A )
__magic_name__ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(_A, _A ):
__magic_name__ : List[Any] = [extracted_paths]
__magic_name__ : Dict = [paths]
elif isinstance(_A, _A ):
assert "train" in extracted_paths.keys()
__magic_name__ : Dict = extracted_paths.values()
__magic_name__ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_A, _A ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__magic_name__ : Union[str, Any] = Path(_A )
__magic_name__ : Union[str, Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_A, etag=_A )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__magic_name__ : Dict = extracted_path.read_text()
__magic_name__ : List[str] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_A, start=1 ):
__magic_name__ : Union[str, Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""", ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : str = request.getfixturevalue(_A )
__magic_name__ : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_A ), start=1 ):
_test_jsonl(_A, _A )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""", ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = request.getfixturevalue(_A )
__magic_name__ : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_A ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_A ), start=1 ):
_test_jsonl(_A, _A )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_A ), start=1 ):
assert os.path.basename(_A ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 138 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__magic_name__ : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = AudioLDMPipeline
lowercase__ = TEXT_TO_AUDIO_PARAMS
lowercase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase__ = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __lowerCAmelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=(3_2, 6_4) ,class_embed_type='''simple_projection''' ,projection_class_embeddings_input_dim=3_2 ,class_embeddings_concat=lowercase_ ,)
lowerCAmelCase__ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase_ ,set_alpha_to_one=lowercase_ ,)
torch.manual_seed(0 )
lowerCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=1 ,out_channels=1 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,projection_dim=3_2 ,)
lowerCAmelCase__ : int = ClapTextModelWithProjection(lowercase_ )
lowerCAmelCase__ : Tuple = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' ,model_max_length=7_7 )
lowerCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_6_0_0_0 ,upsample_initial_channel=1_6 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=lowercase_ ,)
lowerCAmelCase__ : Any = SpeechTaHifiGan(lowercase_ )
lowerCAmelCase__ : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __lowerCAmelCase ( self : str ,lowercase_ : Dict ,lowercase_ : Optional[Any]=0 ):
if str(lowercase_ ).startswith('''mps''' ):
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase__ : List[str] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : Optional[int] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : List[str] = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : str = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 2_5_6
lowerCAmelCase__ : str = audio[:1_0]
lowerCAmelCase__ : str = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Any = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : int = audioldm_pipe.to(lowercase_ )
lowerCAmelCase__ : Tuple = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : List[str] = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ : int = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : List[str] = output.audios[0]
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : str = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ : List[Any] = audioldm_pipe.tokenizer(
lowercase_ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=lowercase_ ,return_tensors='''pt''' ,)
lowerCAmelCase__ : Tuple = text_inputs['''input_ids'''].to(lowercase_ )
lowerCAmelCase__ : List[Any] = audioldm_pipe.text_encoder(
lowercase_ ,)
lowerCAmelCase__ : Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase__ : Tuple = F.normalize(lowercase_ ,dim=-1 )
lowerCAmelCase__ : List[Any] = prompt_embeds
# forward
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : List[str] = audioldm_pipe.to(lowercase_ )
lowerCAmelCase__ : Dict = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : str = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : int = 3 * ['''this is a negative prompt''']
lowerCAmelCase__ : Any = negative_prompt
lowerCAmelCase__ : int = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ : Tuple = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : List[str] = output.audios[0]
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : Optional[int] = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ : Optional[Any] = []
for p in [prompt, negative_prompt]:
lowerCAmelCase__ : int = audioldm_pipe.tokenizer(
lowercase_ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=lowercase_ ,return_tensors='''pt''' ,)
lowerCAmelCase__ : Optional[Any] = text_inputs['''input_ids'''].to(lowercase_ )
lowerCAmelCase__ : Dict = audioldm_pipe.text_encoder(
lowercase_ ,)
lowerCAmelCase__ : Dict = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase__ : Any = F.normalize(lowercase_ ,dim=-1 )
embeds.append(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : int = embeds
# forward
lowerCAmelCase__ : List[str] = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowercase_ )
lowerCAmelCase__ : Optional[int] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : List[str] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = '''egg cracking'''
lowerCAmelCase__ : Tuple = audioldm_pipe(**lowercase_ ,negative_prompt=lowercase_ )
lowerCAmelCase__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 2_5_6
lowerCAmelCase__ : Any = audio[:1_0]
lowerCAmelCase__ : List[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components()
lowerCAmelCase__ : int = PNDMScheduler(skip_prk_steps=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : Optional[int] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Optional[int] = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase__ : Dict = audioldm_pipe(lowercase_ ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : int = audioldm_pipe(lowercase_ ,num_inference_steps=2 ,num_waveforms_per_prompt=lowercase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : List[Any] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=lowercase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : List[Any] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Any = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase__ : Any = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : Dict = audioldm_pipe(audio_length_in_s=0.016 ,**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) / vocoder_sampling_rate == 0.016
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.032 ,**lowercase_ )
lowerCAmelCase__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) / vocoder_sampling_rate == 0.032
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : Optional[int] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : str = ['''hey''']
lowerCAmelCase__ : str = audioldm_pipe(lowercase_ ,num_inference_steps=1 )
lowerCAmelCase__ : str = output.audios.shape
assert audio_shape == (1, 2_5_6)
lowerCAmelCase__ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase__ : int = SpeechTaHifiGan(lowercase_ ).to(lowercase_ )
lowerCAmelCase__ : str = audioldm_pipe(lowercase_ ,num_inference_steps=1 )
lowerCAmelCase__ : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def __lowerCAmelCase ( self : Any ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase_ )
def __lowerCAmelCase ( self : Dict ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase_ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def __lowerCAmelCase ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_ )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ,lowercase_ : Any ,lowercase_ : Union[str, Any]="cpu" ,lowercase_ : Any=torch.floataa ,lowercase_ : Tuple=0 ):
lowerCAmelCase__ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase__ : str = np.random.RandomState(lowercase_ ).standard_normal((1, 8, 1_2_8, 1_6) )
lowerCAmelCase__ : str = torch.from_numpy(lowercase_ ).to(device=lowercase_ ,dtype=lowercase_ )
lowerCAmelCase__ : List[Any] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Union[str, Any] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowerCAmelCase__ : List[Any] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.get_inputs(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = 2_5
lowerCAmelCase__ : Optional[Any] = audioldm_pipe(**lowercase_ ).audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 8_1_9_2_0
lowerCAmelCase__ : Optional[Any] = audio[7_7_2_3_0:7_7_2_4_0]
lowerCAmelCase__ : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowerCAmelCase__ : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Optional[int] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowerCAmelCase__ : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCAmelCase__ : str = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.get_inputs(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe(**lowercase_ ).audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 8_1_9_2_0
lowerCAmelCase__ : List[Any] = audio[2_7_7_8_0:2_7_7_9_0]
lowerCAmelCase__ : Any = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowerCAmelCase__ : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 106 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
__UpperCamelCase : Any = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
__UpperCamelCase : List[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCamelCase : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCamelCase : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__UpperCamelCase : Dict = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 106 | 1 |
"""simple docstring"""
import operator as op
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : str = []
lowerCamelCase : List[str] = lambda a_, a_ : int(x / y ) # noqa: E731 integer division operation
lowerCamelCase : Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ), 'Action'.center(12 ), 'Stack', sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('push(' + x + ')').ljust(12 ), ','.join(__UpperCAmelCase ), sep=' | ' )
else:
lowerCamelCase : List[Any] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + b + ')').ljust(12 ), ','.join(__UpperCAmelCase ), sep=' | ' )
lowerCamelCase : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + a + ')').ljust(12 ), ','.join(__UpperCAmelCase ), sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ), int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('push(' + a + x + b + ')').ljust(12 ), ','.join(__UpperCAmelCase ), sep=' | ', )
return int(stack[0] )
if __name__ == "__main__":
_A = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 352 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( a_, a_, a_ = 1E-12, a_ = 100, ):
'''simple docstring'''
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
lowerCamelCase : Optional[int] = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = 0
lowerCamelCase : Any = 0
lowerCamelCase : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase : Optional[int] = np.dot(a_, a_ )
# Normalize the resulting output vector.
lowerCamelCase : Optional[int] = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
lowerCamelCase : str = np.dot(a_, np.dot(a_, a_ ) )
# Check convergence.
lowerCamelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = lambda_
if is_complex:
lowerCamelCase : Any = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase : str = np.array([41, 4, 20] )
lowerCamelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa )
lowerCamelCase : Dict = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase : str = real_input_matrix
lowerCamelCase : Any = real_vector
elif problem_type == "complex":
lowerCamelCase : str = complex_input_matrix
lowerCamelCase : Dict = complex_vector
# Our implementation.
lowerCamelCase , lowerCamelCase : List[str] = power_iteration(a_, a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase , lowerCamelCase : Optional[Any] = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
lowerCamelCase : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 205 | 0 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
def get_matched_characters(__lowerCamelCase : str , __lowerCamelCase : str ) -> str:
snake_case : Tuple = []
snake_case : Union[str, Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case : Any = int(max(0 , i - limit ) )
snake_case : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__lowerCamelCase )
snake_case : Dict = f"""{_stra[0:_stra.index(__lowerCamelCase )]} {_stra[_stra.index(__lowerCamelCase ) + 1:]}"""
return "".join(__lowerCamelCase )
# matching characters
snake_case : List[str] = get_matched_characters(__lowerCamelCase , __lowerCamelCase )
snake_case : Union[str, Any] = get_matched_characters(__lowerCamelCase , __lowerCamelCase )
snake_case : List[str] = len(__lowerCamelCase )
# transposition
snake_case : List[Any] = (
len([(ca, ca) for ca, ca in zip(__lowerCamelCase , __lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
snake_case : Dict = 0.0
else:
snake_case : int = (
1
/ 3
* (
match_count / len(__lowerCamelCase )
+ match_count / len(__lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case : Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 59 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array:
lowerCAmelCase__ : Dict = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
lowerCAmelCase__ : Optional[Any] = 'f32le'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ : List[Any] = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase__ : List[str] = output_stream[0]
lowerCAmelCase__ : str = np.frombuffer(SCREAMING_SNAKE_CASE_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> Dict:
lowerCAmelCase__ : Optional[Any] = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
if format_for_conversion == "s16le":
lowerCAmelCase__ : Dict = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCAmelCase__ : Tuple = platform.system()
if system == "Linux":
lowerCAmelCase__ : str = 'alsa'
lowerCAmelCase__ : str = 'default'
elif system == "Darwin":
lowerCAmelCase__ : Any = 'avfoundation'
lowerCAmelCase__ : Tuple = ':0'
elif system == "Windows":
lowerCAmelCase__ : Any = 'dshow'
lowerCAmelCase__ : int = 'default'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ : str = _ffmpeg_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
lowerCAmelCase__ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase__ : Tuple = chunk_length_s
lowerCAmelCase__ : Any = ffmpeg_microphone(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , format_for_conversion=SCREAMING_SNAKE_CASE_ )
if format_for_conversion == "s16le":
lowerCAmelCase__ : Optional[Any] = np.intaa
lowerCAmelCase__ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : Optional[Any] = np.floataa
lowerCAmelCase__ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCAmelCase__ : Dict = chunk_length_s / 6
lowerCAmelCase__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
lowerCAmelCase__ : Dict = [stride_length_s, stride_length_s]
lowerCAmelCase__ : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ : List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ : Any = datetime.datetime.now()
lowerCAmelCase__ : Any = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE_ )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE_ ):
# Put everything back in numpy scale
lowerCAmelCase__ : Any = np.frombuffer(item['raw'] , dtype=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = b''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCAmelCase__ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE_ ) < chunk_len:
lowerCAmelCase__ : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE_ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ : Dict = (_stride_left, stride_right)
lowerCAmelCase__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase__ : Optional[int] = False
yield item
lowerCAmelCase__ : Optional[int] = stride_left
lowerCAmelCase__ : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE_ ) > stride_left:
lowerCAmelCase__ : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase__ : Any = False
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE_ ) as ffmpeg_process:
while True:
lowerCAmelCase__ : List[str] = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 212 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Any:
# Initialise PyTorch model
a = AlbertConfig.from_json_file(__UpperCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
a = AlbertForPreTraining(__UpperCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , __UpperCamelCase)
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 180 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowercase_ ( pl.LightningModule ):
def __init__( self , a ):
super().__init__()
UpperCamelCase__ = model
UpperCamelCase__ = 2
UpperCamelCase__ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __a ( self ):
pass
def _UpperCamelCase ( __A , __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = LongformerModel.from_pretrained(__A )
UpperCamelCase__ = LightningModel(__A )
UpperCamelCase__ = torch.load(__A , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
UpperCamelCase__ = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__ : str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 80 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 1 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
require_version(deps[pkg] , __UpperCAmelCase )
| 2 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowercase__ = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : str ) -> Tuple:
return 12
@property
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
return 12
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return 32
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase : Any = 12
UpperCAmelCase : Optional[int] = 12
UpperCAmelCase : Tuple = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCAmelCase : Optional[Any] = TransformeraDModel(**lowercase_ )
return model
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Any = 'cpu'
UpperCAmelCase : Optional[int] = self.dummy_vqvae
UpperCAmelCase : List[str] = self.dummy_text_encoder
UpperCAmelCase : str = self.dummy_tokenizer
UpperCAmelCase : Dict = self.dummy_transformer
UpperCAmelCase : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase : str = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
UpperCAmelCase : Optional[Any] = VQDiffusionPipeline(
vqvae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , transformer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Any = 'teddy bear playing in the pool'
UpperCAmelCase : List[str] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : List[str] = pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='np' )
UpperCAmelCase : List[str] = output.images
UpperCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Union[str, Any] = pipe(
[prompt] , generator=lowercase_ , output_type='np' , return_dict=lowercase_ , num_inference_steps=2 )[0]
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : str = 'cpu'
UpperCAmelCase : List[str] = self.dummy_vqvae
UpperCAmelCase : Tuple = self.dummy_text_encoder
UpperCAmelCase : Dict = self.dummy_tokenizer
UpperCAmelCase : int = self.dummy_transformer
UpperCAmelCase : Optional[int] = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase : List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCAmelCase : List[Any] = VQDiffusionPipeline(
vqvae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , transformer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = 'teddy bear playing in the pool'
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Tuple = pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='np' )
UpperCAmelCase : List[Any] = output.images
UpperCAmelCase : Tuple = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : int = pipe(
[prompt] , generator=lowercase_ , output_type='np' , return_dict=lowercase_ , num_inference_steps=2 )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
UpperCAmelCase : Tuple = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
UpperCAmelCase : str = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCAmelCase : Any = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Any = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Dict = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 151 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["YolosFeatureExtractor"]
lowercase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 | 1 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_SCREAMING_SNAKE_CASE : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def UpperCamelCase_( snake_case : str , snake_case : Tuple=False ):
'''simple docstring'''
snake_case_ , snake_case_ = create_model(
"HTSAT-tiny" , "roberta" , snake_case , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=snake_case , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = r".*sequential.(\d+).*"
snake_case_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case_ = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
# replace sequential layers with list
snake_case_ = re.match(snake_case , snake_case ).group(1 )
snake_case_ = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(snake_case )//3}.linear.' )
elif re.match(snake_case , snake_case ):
snake_case_ = int(re.match(snake_case , snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case_ = 1 if projecton_layer == 0 else 2
snake_case_ = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case_ = value
snake_case_ = mixed_qkv.size(0 ) // 3
snake_case_ = mixed_qkv[:qkv_dim]
snake_case_ = mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case_ = mixed_qkv[qkv_dim * 2 :]
snake_case_ = query_layer
snake_case_ = key_layer
snake_case_ = value_layer
else:
snake_case_ = value
return model_state_dict
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Any=False ):
'''simple docstring'''
snake_case_ , snake_case_ = init_clap(snake_case , enable_fusion=snake_case )
clap_model.eval()
snake_case_ = clap_model.state_dict()
snake_case_ = rename_state_dict(snake_case )
snake_case_ = ClapConfig()
snake_case_ = enable_fusion
snake_case_ = ClapModel(snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case , strict=snake_case )
model.save_pretrained(snake_case )
transformers_config.save_pretrained(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 353 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "mgp-str"
def __init__( self , a__=[32, 128] , a__=4 , a__=3 , a__=27 , a__=38 , a__=50_257 , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=4.0 , a__=True , a__=False , a__=1e-5 , a__=0.0 , a__=0.0 , a__=0.0 , a__=False , a__=0.0_2 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = max_token_length
snake_case_ = num_character_labels
snake_case_ = num_bpe_labels
snake_case_ = num_wordpiece_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = mlp_ratio
snake_case_ = distilled
snake_case_ = layer_norm_eps
snake_case_ = drop_rate
snake_case_ = qkv_bias
snake_case_ = attn_drop_rate
snake_case_ = drop_path_rate
snake_case_ = output_aa_attentions
snake_case_ = initializer_range
| 92 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = np.argmax(A_, axis=1 )
return np.sum(outputs == labels )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, encoding="""utf_8""" ) as f:
__magic_name__ = csv.reader(A_ )
__magic_name__ = []
next(A_ ) # skip the first line
for line in tqdm(A_ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( A_, A_, A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = []
for dataset in encoded_datasets:
__magic_name__ = len(A_ )
__magic_name__ = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
__magic_name__ = np.zeros((n_batch, 2), dtype=np.intaa )
__magic_name__ = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
__magic_name__ = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A_ ):
__magic_name__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ = with_conta
__magic_name__ = with_conta
__magic_name__ = len(A_ ) - 1
__magic_name__ = len(A_ ) - 1
__magic_name__ = with_conta
__magic_name__ = with_conta
__magic_name__ = mc_label
__magic_name__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A_ ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""", type=A_, default="""openai-gpt""", help="""pretrained model name""" )
parser.add_argument("""--do_train""", action="""store_true""", help="""Whether to run training.""" )
parser.add_argument("""--do_eval""", action="""store_true""", help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""", default=A_, type=A_, required=A_, help="""The output directory where the model predictions and checkpoints will be written.""", )
parser.add_argument("""--train_dataset""", type=A_, default="""""" )
parser.add_argument("""--eval_dataset""", type=A_, default="""""" )
parser.add_argument("""--seed""", type=A_, default=42 )
parser.add_argument("""--num_train_epochs""", type=A_, default=3 )
parser.add_argument("""--train_batch_size""", type=A_, default=8 )
parser.add_argument("""--eval_batch_size""", type=A_, default=16 )
parser.add_argument("""--adam_epsilon""", default=1e-8, type=A_, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", type=A_, default=1 )
parser.add_argument(
"""--max_steps""", default=-1, type=A_, help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
), )
parser.add_argument(
"""--gradient_accumulation_steps""", type=A_, default=1, help="""Number of updates steps to accumulate before performing a backward/update pass.""", )
parser.add_argument("""--learning_rate""", type=A_, default=6.25e-5 )
parser.add_argument("""--warmup_steps""", default=0, type=A_, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""", type=A_, default="""warmup_linear""" )
parser.add_argument("""--weight_decay""", type=A_, default=0.01 )
parser.add_argument("""--lm_coef""", type=A_, default=0.9 )
parser.add_argument("""--n_valid""", type=A_, default=374 )
parser.add_argument("""--server_ip""", type=A_, default="""""", help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""", type=A_, default="""""", help="""Can be used for distant debugging.""" )
__magic_name__ = parser.parse_args()
print(A_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=A_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__magic_name__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(A_, A_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ = ["""_start_""", """_delimiter_""", """_classify_"""]
__magic_name__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A_ )
__magic_name__ = tokenizer.convert_tokens_to_ids(A_ )
__magic_name__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A_ ) )
model.to(A_ )
# Load and encode the datasets
def tokenize_and_encode(A_ ):
if isinstance(A_, A_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A_ ) )
elif isinstance(A_, A_ ):
return obj
return [tokenize_and_encode(A_ ) for o in obj]
logger.info("""Encoding dataset...""" )
__magic_name__ = load_rocstories_dataset(args.train_dataset )
__magic_name__ = load_rocstories_dataset(args.eval_dataset )
__magic_name__ = (train_dataset, eval_dataset)
__magic_name__ = tokenize_and_encode(A_ )
# Compute the max input length for the Transformer
__magic_name__ = model.config.n_positions // 2 - 2
__magic_name__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ = min(A_, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ = pre_process_datasets(A_, A_, A_, *A_ )
__magic_name__ , __magic_name__ = tensor_datasets[0], tensor_datasets[1]
__magic_name__ = TensorDataset(*A_ )
__magic_name__ = RandomSampler(A_ )
__magic_name__ = DataLoader(A_, sampler=A_, batch_size=args.train_batch_size )
__magic_name__ = TensorDataset(*A_ )
__magic_name__ = SequentialSampler(A_ )
__magic_name__ = DataLoader(A_, sampler=A_, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ = args.max_steps
__magic_name__ = args.max_steps // (len(A_ ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ = len(A_ ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ = list(model.named_parameters() )
__magic_name__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
__magic_name__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
__magic_name__ = AdamW(A_, lr=args.learning_rate, eps=args.adam_epsilon )
__magic_name__ = get_linear_schedule_with_warmup(
A_, num_warmup_steps=args.warmup_steps, num_training_steps=A_ )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc="""Epoch""" ):
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = tqdm(A_, desc="""Training""" )
for step, batch in enumerate(A_ ):
__magic_name__ = tuple(t.to(A_ ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = batch
__magic_name__ = model(A_, mc_token_ids=A_, lm_labels=A_, mc_labels=A_ )
__magic_name__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ = """Training loss: {:.2e} lr: {:.2e}""".format(A_, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ = model.module if hasattr(A_, """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ = os.path.join(args.output_dir, A_ )
__magic_name__ = os.path.join(args.output_dir, A_ )
torch.save(model_to_save.state_dict(), A_ )
model_to_save.config.to_json_file(A_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A_ )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ = 0, 0
__magic_name__ , __magic_name__ = 0, 0
for batch in tqdm(A_, desc="""Evaluating""" ):
__magic_name__ = tuple(t.to(A_ ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = model(
A_, mc_token_ids=A_, lm_labels=A_, mc_labels=A_ )
__magic_name__ = mc_logits.detach().cpu().numpy()
__magic_name__ = mc_labels.to("""cpu""" ).numpy()
__magic_name__ = accuracy(A_, A_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ = eval_loss / nb_eval_steps
__magic_name__ = eval_accuracy / nb_eval_examples
__magic_name__ = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
__magic_name__ = os.path.join(args.output_dir, """eval_results.txt""" )
with open(A_, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""", A_, str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 88 | """simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
if not arr:
return 0
lowercase__: Any = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__: Union[str, Any] = 0.0
for num in arr:
lowercase__: List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase__: int = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 177 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
def merge(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase_ ) <= 1:
return collection
lowerCAmelCase : str = len(lowercase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 362 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 13 , snake_case__ = 64 , snake_case__ = 2 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = True , snake_case__ = True , snake_case__ = 128 , snake_case__=[16, 32, 64, 128] , snake_case__ = 7 , snake_case__ = 4 , snake_case__ = 37 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 10 , snake_case__ = 0.02 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 128 , snake_case__ = [2, 2, 2, 2] , snake_case__ = 2 , snake_case__ = 2 , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : int = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : int = is_training
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[int] = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : List[str] = encoder_stride
lowerCAmelCase : Union[str, Any] = num_attention_outputs
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Tuple = embed_dim + 1
lowerCAmelCase : str = resolution
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : Any = hidden_sizes
lowerCAmelCase : List[str] = dim
lowerCAmelCase : str = mlp_expansion_ratio
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFEfficientFormerModel(config=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.type_sequence_label_size
lowerCAmelCase : Dict = TFEfficientFormerForImageClassification(snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : str = 1
lowerCAmelCase : Any = TFEfficientFormerForImageClassification(snake_case__ )
lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : str = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a : Union[str, Any] =(
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a : int =False
a : Optional[Any] =False
a : List[Any] =False
a : str =False
a : List[Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = TFEfficientFormerModelTester(self )
lowerCAmelCase : Dict = ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(snake_case__ )
lowerCAmelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = model_class(snake_case__ )
lowerCAmelCase : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCAmelCase : Union[str, Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCAmelCase : Tuple = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase : Tuple = outputs.decoder_hidden_states
self.asseretIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , snake_case__ )
lowerCAmelCase : int = getattr(self.model_tester , "seq_length" , snake_case__ )
lowerCAmelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , snake_case__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = TFEfficientFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , "seq_length" , snake_case__ )
lowerCAmelCase : Dict = getattr(self.model_tester , "encoder_seq_length" , snake_case__ )
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , "key_length" , snake_case__ )
lowerCAmelCase : List[str] = getattr(self.model_tester , "chunk_length" , snake_case__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCAmelCase : Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase : int = True
lowerCAmelCase : int = False
lowerCAmelCase : Dict = True
lowerCAmelCase : List[Any] = model_class(snake_case__ )
lowerCAmelCase : Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
lowerCAmelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : int = True
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : int = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
lowerCAmelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase : List[str] = model_class(snake_case__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=snake_case__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.assertTrue(outputs_dict is not None )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : Optional[int] = model(**snake_case__ , training=snake_case__ )
# verify the logits
lowerCAmelCase : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Tuple = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : Dict = model(**snake_case__ , training=snake_case__ )
# verify the logits
lowerCAmelCase : Optional[int] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Any = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 133 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = (PNDMScheduler,)
lowerCamelCase : Optional[int] = (('''num_inference_steps''', 50),)
def __UpperCAmelCase ( self : int , **UpperCAmelCase__ : List[str] ) -> Dict:
lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : List[Any] ) -> Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase__ )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase__ )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
lowerCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
lowerCAmelCase = new_scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
lowerCAmelCase = new_scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
pass
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int]=0 , **UpperCAmelCase__ : Tuple ) -> List[str]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase__ )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
lowerCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
lowerCAmelCase = new_scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
lowerCAmelCase = new_scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Dict , **UpperCAmelCase__ : Tuple ) -> str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase__ )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = 1_0
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , 'set_timesteps' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , 0 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , 1 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , 0 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , 1 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self : int ) -> Tuple:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase__ )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> str:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> str:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase = 2_7
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
with self.assertRaises(UpperCAmelCase__ ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : int ="gpt_neox_japanese"
def __init__( self : str , lowerCAmelCase : List[Any]=3_20_00 , lowerCAmelCase : Any=25_60 , lowerCAmelCase : Any=32 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Optional[int]=1.00 , lowerCAmelCase : str=1_00_00 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Tuple=1e-5 , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=3_19_96 , lowerCAmelCase : Optional[Any]=3_19_99 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[Any]=0.0 , **lowerCAmelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = max_position_embeddings
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : int = intermediate_multiple_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Dict = rotary_pct
__lowerCAmelCase : Dict = rotary_emb_base
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : str = use_cache
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Dict = hidden_dropout
| 139 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =(EulerDiscreteScheduler,)
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Tuple = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = output.prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : List[Any] = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Dict = output.prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : Dict = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Any = output.prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : int = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : List[Any] = output.prev_sample
__lowerCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 139 | 1 |
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
A_ : List[str] = 4
A_ : int = (1 << p) - 1
for _ in range(p - 2 ):
A_ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 140 | import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if hor == 1_28:
A_ : List[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Tuple = (32, 1_28, 2_56)
A_ : Optional[int] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A_ : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Any = (32, 64, 1_28, 2_56)
A_ : int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A_ : List[str] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A_ : List[Any] = model.state_dict()
A_ : List[str] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[Any] = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : Optional[int] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A_ : Union[str, Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A_ : List[Any] = model
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[int] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : List[str] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 140 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Tuple = {"""tokenizer_file""": """tokenizer.json"""}
__a: Union[str, Any] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = None
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=False , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowercase__ : int = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase__ : Tuple = add_prefix_space
lowercase__ : List[str] = pre_tok_class(**__lowerCAmelCase )
lowercase__ : Union[str, Any] = add_prefix_space
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : str = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
lowercase__ : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[int]:
lowercase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 214 | '''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
lowercase__ : Dict = {}
def _lowerCAmelCase( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
lowercase__ : Union[str, Any] = [to_vertex]
def _lowerCAmelCase( self ) -> None:
# visited array for storing already visited nodes
lowercase__ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# mark start vertex as visited
lowercase__ : List[str] = True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__a: Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 214 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCamelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCamelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
UpperCamelCase = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
UpperCamelCase = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
UpperCamelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
UpperCamelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 319 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 1 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
_UpperCAmelCase : Tuple = hex_num[0] == """-"""
if is_negative:
_UpperCAmelCase : Union[str, Any] = hex_num[1:]
try:
_UpperCAmelCase : Optional[Any] = int(lowerCAmelCase__ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
_UpperCAmelCase : Union[str, Any] = """"""
while int_num > 0:
_UpperCAmelCase : int = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a__ ( lowerCAmelCase__ ) -> int:
return EnvironmentCommand()
def a__ ( lowerCAmelCase__ ) -> Dict:
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase_ ( __a ):
@staticmethod
def lowercase_ ( _A : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_A )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_A , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_A )
def __init__( self : Any , _A : List[str] , *_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = accelerate_config_file
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''not installed'''
if is_safetensors_available():
import safetensors
UpperCAmelCase__ : List[str] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
UpperCAmelCase__ : Union[str, Any] = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
UpperCAmelCase__ : Optional[int] = '''not installed'''
UpperCAmelCase__ : str = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCAmelCase__ : List[str] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_A ):
UpperCAmelCase__ : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCAmelCase__ : List[str] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_A , _A )
else f"""\t{accelerate_config}"""
)
UpperCAmelCase__ : Optional[int] = '''not installed'''
UpperCAmelCase__ : str = '''NA'''
if is_torch_available():
import torch
UpperCAmelCase__ : Optional[Any] = torch.__version__
UpperCAmelCase__ : Dict = torch.cuda.is_available()
UpperCAmelCase__ : List[Any] = '''not installed'''
UpperCAmelCase__ : str = '''NA'''
if is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Dict = tf.__version__
try:
# deprecated in v2.1
UpperCAmelCase__ : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCAmelCase__ : List[str] = bool(tf.config.list_physical_devices('''GPU''' ) )
UpperCAmelCase__ : Union[str, Any] = '''not installed'''
UpperCAmelCase__ : Dict = '''not installed'''
UpperCAmelCase__ : Any = '''not installed'''
UpperCAmelCase__ : int = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
UpperCAmelCase__ : Optional[int] = flax.__version__
UpperCAmelCase__ : Optional[Any] = jax.__version__
UpperCAmelCase__ : Dict = jaxlib.__version__
UpperCAmelCase__ : Dict = jax.lib.xla_bridge.get_backend().platform
UpperCAmelCase__ : Dict = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_A ) )
return info
@staticmethod
def lowercase_ ( _A : List[str] ):
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 181 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase__ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 181 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 223 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
UpperCamelCase_: Union[str, Any] = GPTaConfig()
else:
UpperCamelCase_: Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase )
UpperCamelCase_: List[Any] = GPTaModel(lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
UpperCamelCase_: int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase_: Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 223 | 1 |
def lowerCAmelCase_ ( __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: List[str] =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 10 |
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( snake_case , snake_case , snake_case ) -> list[str]:
lowercase__: int = set()
# keep track of all the paths to be checked
lowercase__: Optional[int] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__: List[Any] = queue.pop(0 )
# get the last node from the path
lowercase__: Optional[int] = path[-1]
if node not in explored:
lowercase__: Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__: Tuple = list(snake_case )
new_path.append(snake_case )
queue.append(snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(snake_case )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( snake_case , snake_case , snake_case ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__: Tuple = [start]
lowercase__: List[Any] = set(snake_case )
# Keep tab on distances from `start` node.
lowercase__: Tuple = {start: 0, target: -1}
while queue:
lowercase__: Dict = queue.pop(0 )
if node == target:
lowercase__: str = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(snake_case )
queue.append(snake_case )
lowercase__: List[str] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 196 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __UpperCAmelCase :
def __init__( self: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: Optional[int]=6 , UpperCAmelCase_: Optional[Any]=17 , UpperCAmelCase_: Dict=23 , UpperCAmelCase_: Dict=11 , UpperCAmelCase_: Optional[int]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = act_dim
_SCREAMING_SNAKE_CASE = state_dim
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = is_training
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_SCREAMING_SNAKE_CASE = random_attention_mask((self.batch_size, self.seq_length) )
_SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: int , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: Dict , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = DecisionTransformerModel(config=__A )
model.to(__A )
model.eval()
_SCREAMING_SNAKE_CASE = model(__A , __A , __A , __A , __A , __A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (A__ ,A__ ,A__ ,unittest.TestCase ):
__snake_case : Any = (DecisionTransformerModel,) if is_torch_available() else ()
__snake_case : Tuple = ()
__snake_case : Optional[int] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__snake_case : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__snake_case : Optional[Any] = False
__snake_case : Union[str, Any] = False
__snake_case : Dict = False
__snake_case : Tuple = False
__snake_case : str = False
__snake_case : Any = False
__snake_case : Optional[Any] = False
__snake_case : Dict = False
__snake_case : List[str] = False
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = DecisionTransformerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__A )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__A )] , __A )
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 2 # number of steps of autoregressive prediction we will perform
_SCREAMING_SNAKE_CASE = 10 # defined by the RL environment, may be normalized
_SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
_SCREAMING_SNAKE_CASE = model.to(__A )
_SCREAMING_SNAKE_CASE = model.config
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ) # env.reset()
_SCREAMING_SNAKE_CASE = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__A )
_SCREAMING_SNAKE_CASE = torch.tensor(__A , device=__A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_SCREAMING_SNAKE_CASE = state
_SCREAMING_SNAKE_CASE = torch.zeros(1 , 0 , config.act_dim , device=__A , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = torch.zeros(1 , 0 , device=__A , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = torch.tensor(0 , device=__A , dtype=torch.long ).reshape(1 , 1 )
for step in range(__A ):
_SCREAMING_SNAKE_CASE = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__A )] , dim=1 )
_SCREAMING_SNAKE_CASE = torch.cat([rewards, torch.zeros(1 , 1 , device=__A )] , dim=1 )
_SCREAMING_SNAKE_CASE = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(
states=__A , actions=__A , rewards=__A , returns_to_go=__A , timesteps=__A , attention_mask=__A , return_dict=__A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_SCREAMING_SNAKE_CASE = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ),
1.0,
False,
{},
)
_SCREAMING_SNAKE_CASE = action_pred[0, -1]
_SCREAMING_SNAKE_CASE = torch.cat([states, state] , dim=1 )
_SCREAMING_SNAKE_CASE = returns_to_go[0, -1] - reward
_SCREAMING_SNAKE_CASE = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_SCREAMING_SNAKE_CASE = torch.cat(
[timesteps, torch.ones((1, 1) , device=__A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: str=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: int=32 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Dict=37 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Optional[int]=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[int]=0.02 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Tuple=None , UpperCAmelCase_: Any=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = projection_dim
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRReader(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__snake_case : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__snake_case : str = False
__snake_case : List[Any] = False
__snake_case : Any = False
__snake_case : List[Any] = False
__snake_case : Tuple = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *_A , **_A ) -> None:
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 299 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCAmelCase_ ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ : int = '''imagegpt'''
UpperCamelCase__ : Union[str, Any] = ['''past_key_values''']
UpperCamelCase__ : str = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=512 + 1 , _A=32 * 32 , _A=512 , _A=24 , _A=8 , _A=None , _A="quick_gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1e-5 , _A=0.0_2 , _A=True , _A=True , _A=False , _A=False , _A=False , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scale_attn_weights
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
__SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
__SCREAMING_SNAKE_CASE = tie_word_embeddings
super().__init__(tie_word_embeddings=_A , **_A )
class UpperCAmelCase_ ( _lowerCamelCase ):
'''simple docstring'''
@property
def _A ( self ):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def _A ( self , _A , _A = 1 , _A = -1 , _A = False , _A = None , _A = 3 , _A = 32 , _A = 32 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._generate_dummy_images(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE = dict(preprocessor(images=_A , return_tensors=_A ) )
return inputs
| 353 |
def __lowercase ( a__ = 10_00 ) -> int:
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
__SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
__SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
__SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int =VOCAB_FILES_NAMES
a : Tuple =PRETRAINED_VOCAB_FILES_MAP
a : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] =["input_ids", "attention_mask"]
a : int =RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
lowerCAmelCase : Optional[int] = getattr(snake_case__ , pre_tok_state.pop("type" ) )
lowerCAmelCase : Union[str, Any] = add_prefix_space
lowerCAmelCase : Union[str, Any] = pre_tok_class(**snake_case__ )
lowerCAmelCase : int = add_prefix_space
lowerCAmelCase : Union[str, Any] = "post_processor"
lowerCAmelCase : List[str] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
lowerCAmelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase : Tuple = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase : List[Any] = tuple(state["cls"] )
lowerCAmelCase : int = False
if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
lowerCAmelCase : Union[str, Any] = add_prefix_space
lowerCAmelCase : List[Any] = True
if state.get("trim_offsets" , snake_case__ ) != trim_offsets:
lowerCAmelCase : List[str] = trim_offsets
lowerCAmelCase : Optional[int] = True
if changes_to_apply:
lowerCAmelCase : Union[str, Any] = getattr(snake_case__ , state.pop("type" ) )
lowerCAmelCase : Optional[Any] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
lowerCAmelCase : Optional[Any] = value
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [self.sep_token_id]
lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 108 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Union[str, Any] = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ ):
raise NotImplementedError()
def snake_case__ ( self ):
raise NotImplementedError()
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , **lowerCamelCase__ ):
_lowerCamelCase = tokenizer
_lowerCamelCase = skip_prompt
_lowerCamelCase = decode_kwargs
# variables used in the streaming process
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = True
def snake_case__ ( self , lowerCamelCase__ ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
_lowerCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
_lowerCamelCase = text[self.print_len :]
_lowerCamelCase = []
_lowerCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCamelCase = text[self.print_len :]
self.print_len += len(lowerCamelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCamelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(lowerCamelCase__ )
self.on_finalized_text(lowerCamelCase__ )
def snake_case__ ( self ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCamelCase = text[self.print_len :]
_lowerCamelCase = []
_lowerCamelCase = 0
else:
_lowerCamelCase = ''''''
_lowerCamelCase = True
self.on_finalized_text(lowerCamelCase__ , stream_end=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
print(lowerCamelCase__ , flush=lowerCamelCase__ , end='''''' if not stream_end else None )
def snake_case__ ( self , lowerCamelCase__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = Queue()
_lowerCamelCase = None
_lowerCamelCase = timeout
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
self.text_queue.put(lowerCamelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def snake_case__ ( self ):
_lowerCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 73 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = MODEL_FOR_MASKED_LM_MAPPING
__SCREAMING_SNAKE_CASE = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
__UpperCamelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 3_8_0_1_5, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 2_5_5_0_6, """token_str""": """ accuser"""},
] , )
__UpperCamelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 3_8_0_1_5,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 2_5_5_0_6,
"""token_str""": """ accuser""",
},
] , )
__UpperCamelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
__UpperCamelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 3_5_6_7_6, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
__UpperCamelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
__UpperCamelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
] , )
__UpperCamelCase = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
__UpperCamelCase = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase , lowercase )
@slow
@require_torch
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(lowercase )
@slow
@require_tf
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(lowercase )
def __lowerCamelCase ( self , lowercase ) -> Dict:
__UpperCamelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 6_1_0, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_5_7_3, """token_str""": """ Chris"""},
] , )
__UpperCamelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_2_0_1,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2_7_9_0,
"""token_str""": """ Lyon""",
},
] , )
__UpperCamelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
__UpperCamelCase = None
__UpperCamelCase = None
self.run_pipeline_test(lowercase , [] )
@require_tf
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
__UpperCamelCase = None
__UpperCamelCase = None
self.run_pipeline_test(lowercase , [] )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def __lowerCamelCase ( self , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = fill_masker.tokenizer
__UpperCamelCase = fill_masker.model
__UpperCamelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
lowercase , [
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
] , )
with self.assertRaises(lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase ):
fill_masker("""This is""" )
self.run_test_top_k(lowercase , lowercase )
self.run_test_targets(lowercase , lowercase )
self.run_test_top_k_targets(lowercase , lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase , lowercase )
self.fill_mask_with_multiple_masks(lowercase , lowercase )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = tokenizer.get_vocab()
__UpperCamelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase , targets=lowercase )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowercase )
__UpperCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowercase ) )
# Call argument
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowercase )
__UpperCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowercase ) )
# Score equivalence
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase )
__UpperCamelCase = [top_mask["""token_str"""] for top_mask in outputs]
__UpperCamelCase = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ) == set(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase )
__UpperCamelCase = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
# Raises with invalid
with self.assertRaises(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""""""] )
with self.assertRaises(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="""""" )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Any:
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase , top_k=2 )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Any:
__UpperCamelCase = tokenizer.get_vocab()
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
# top_k=2, ntargets=3
__UpperCamelCase = sorted(vocab.keys() )[:3]
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCamelCase = [el["""token_str"""] for el in sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ).issubset(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCamelCase = sorted(vocab.keys() )[:3]
__UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCamelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=lowercase , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase ) , 3 )
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
lowercase , [
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
] , )
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __get__( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
SCREAMING_SNAKE_CASE : Union[str, Any] = "__cached_" + self.fget.__name__
SCREAMING_SNAKE_CASE : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if cached is None:
SCREAMING_SNAKE_CASE : List[str] = self.fget(UpperCAmelCase_ )
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return cached
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if is_torch_fx_proxy(lowercase ):
return True
if is_torch_available():
import torch
if isinstance(lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase , np.ndarray )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return isinstance(lowercase , np.ndarray )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return _is_numpy(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
import torch
return isinstance(lowercase , torch.Tensor )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
import torch
return isinstance(lowercase , torch.device )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
import torch
if isinstance(lowercase , lowercase ):
if hasattr(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowercase , lowercase )
else:
return False
return isinstance(lowercase , torch.dtype )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
import tensorflow as tf
return isinstance(lowercase , tf.Tensor )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowercase )
return type(lowercase ) == tf.Tensor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase , jnp.ndarray )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_py_obj(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return [to_py_obj(lowercase ) for o in obj]
elif is_tf_tensor(lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase ).tolist()
elif isinstance(lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if isinstance(lowercase , (dict, UserDict) ):
return {k: to_numpy(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase , (list, tuple) ):
return np.array(lowercase )
elif is_tf_tensor(lowercase ):
return obj.numpy()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase )
else:
return obj
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = fields(self )
# Safety and consistency checks
if not len(UpperCAmelCase_ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
SCREAMING_SNAKE_CASE : List[str] = getattr(self , class_fields[0].name )
SCREAMING_SNAKE_CASE : Tuple = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Any = first_field.items()
SCREAMING_SNAKE_CASE : List[Any] = True
else:
try:
SCREAMING_SNAKE_CASE : str = iter(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = True
except TypeError:
SCREAMING_SNAKE_CASE : str = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCAmelCase_ ):
if (
not isinstance(UpperCAmelCase_ , (list, tuple) )
or not len(UpperCAmelCase_ ) == 2
or not isinstance(element[0] , UpperCAmelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
SCREAMING_SNAKE_CASE : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = element[1]
elif first_field is not None:
SCREAMING_SNAKE_CASE : int = first_field
else:
for field in class_fields:
SCREAMING_SNAKE_CASE : Any = getattr(self , field.name )
if v is not None:
SCREAMING_SNAKE_CASE : Tuple = v
def __delitem__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _A ( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _A ( self : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _A ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Tuple , UpperCAmelCase_ : Tuple ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Any = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_ )
super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_ )
def __setitem__( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
# Will raise a KeyException if needed
super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@classmethod
def _A ( cls : List[str] , UpperCAmelCase_ : Tuple ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = '''longest'''
UpperCamelCase_ : List[Any] = '''max_length'''
UpperCamelCase_ : Tuple = '''do_not_pad'''
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = '''pt'''
UpperCamelCase_ : Optional[Any] = '''tf'''
UpperCamelCase_ : Tuple = '''np'''
UpperCamelCase_ : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : List[ContextManager] ):
SCREAMING_SNAKE_CASE : Optional[Any] = context_managers
SCREAMING_SNAKE_CASE : List[str] = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCAmelCase_ )
def __exit__( self : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ):
self.stack.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = infer_framework(lowercase )
if framework == "tf":
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE : Any = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = model_class.__name__
SCREAMING_SNAKE_CASE : str = infer_framework(lowercase )
if framework == "tf":
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE : int = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE : Any = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase__ ( lowercase , lowercase = "" , lowercase = "." ):
"""simple docstring"""
def _flatten_dict(lowercase , lowercase="" , lowercase="." ):
for k, v in d.items():
SCREAMING_SNAKE_CASE : Any = str(lowercase ) + delimiter + str(lowercase ) if parent_key else k
if v and isinstance(lowercase , lowercase ):
yield from flatten_dict(lowercase , lowercase , delimiter=lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase , lowercase , lowercase ) )
@contextmanager
def lowerCamelCase__ ( lowercase , lowercase = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.transpose(lowercase , axes=lowercase )
elif is_torch_tensor(lowercase ):
return array.T if axes is None else array.permute(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.transpose(lowercase , perm=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.transpose(lowercase , axes=lowercase )
else:
raise ValueError(F'''Type not supported for transpose: {type(lowercase )}.''' )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.reshape(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.reshape(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.reshape(lowercase , lowercase )
elif is_jax_tensor(lowercase ):
return jnp.reshape(lowercase , lowercase )
else:
raise ValueError(F'''Type not supported for reshape: {type(lowercase )}.''' )
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.squeeze(lowercase , axis=lowercase )
elif is_torch_tensor(lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.squeeze(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.squeeze(lowercase , axis=lowercase )
else:
raise ValueError(F'''Type not supported for squeeze: {type(lowercase )}.''' )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.expand_dims(lowercase , lowercase )
elif is_torch_tensor(lowercase ):
return array.unsqueeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.expand_dims(lowercase , axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.expand_dims(lowercase , axis=lowercase )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowercase )}.''' )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if is_numpy_array(lowercase ):
return np.size(lowercase )
elif is_torch_tensor(lowercase ):
return array.numel()
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.size(lowercase )
elif is_jax_tensor(lowercase ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(lowercase )}.''' )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowercase , (tuple, list) ):
SCREAMING_SNAKE_CASE : str = [F'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
SCREAMING_SNAKE_CASE : str = F'''{repo_id}--{value}'''
return auto_map
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
for base_class in inspect.getmro(lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = base_class.__module__
SCREAMING_SNAKE_CASE : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 319 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case = get_logger(__name__)
snake_case = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
for processor in self:
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : float ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
SCREAMING_SNAKE_CASE : Optional[int] = temperature
def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
SCREAMING_SNAKE_CASE : Optional[int] = top_p
SCREAMING_SNAKE_CASE : str = filter_value
SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] )
SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value )
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ )
# min tokens to keep
SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1]
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = filter_value
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape
SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten()
SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
SCREAMING_SNAKE_CASE : List[str] = min_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
# create boolean flag to decide if min length penalty should be applied
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = begin_index
def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
def _force_token(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = scores.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id
SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
# suppress <|notimestamps|> which is handled by without_timestamps
SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , )
return jnp.where(
UpperCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(
UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
return scores
| 319 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = '''▁'''
_A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowerCAmelCase ( a__ , unittest.TestCase ):
_lowercase =BertGenerationTokenizer
_lowercase =False
_lowercase =True
def __a ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> int:
lowerCAmelCase_ = "<s>"
lowerCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> str:
lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_002 )
def __a ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> Tuple:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "Hello World!"
lowerCAmelCase_ = [18_536, 2_260, 101]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCAmelCase_ = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def __a ( self ) -> Union[str, Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="pt" , return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ = BertGenerationConfig()
lowerCAmelCase_ = BertGenerationEncoder(lowerCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def __a ( self ) -> Tuple:
# fmt: off
lowerCAmelCase_ = {"input_ids": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 231 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowercase , _lowercase ) ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
a : str = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(_lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
a : int = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(_lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
a : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(_lowercase )
a : str = []
for value in value_array:
a : List[Any] = euclidean(_lowercase , dataset[0] )
a : str = dataset[0].tolist()
for dataset_value in dataset[1:]:
a : Tuple = euclidean(_lowercase , _lowercase )
if dist > temp_dist:
a : Dict = temp_dist
a : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->float:
'''simple docstring'''
return np.dot(_lowercase , _lowercase ) / (norm(_lowercase ) * norm(_lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :List[str] , _lowercase :List[str] , _lowercase :Optional[int]=7 , _lowercase :Optional[Any]=3 , _lowercase :str=18 , _lowercase :Optional[Any]=30 , _lowercase :Union[str, Any]=400 , _lowercase :Any=True , _lowercase :List[str]=None , _lowercase :Optional[Any]=True , ) -> Union[str, Any]:
UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = apply_ocr
def __a ( self :Union[str, Any]) -> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Any =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __a ( self :Optional[int]) -> Tuple:
UpperCAmelCase_ = LayoutLMvaImageProcessingTester(self)
@property
def __a ( self :str) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''apply_ocr'''))
def __a ( self :str) -> Dict:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18})
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
def __a ( self :Any) -> List[str]:
pass
def __a ( self :List[str]) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _lowercase)
self.assertIsInstance(encoding.boxes , _lowercase)
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __a ( self :List[Any]) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __a ( self :Dict) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __a ( self :List[Any]) -> List[Any]:
# with apply_OCR = True
UpperCAmelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''')
UpperCAmelCase_ = Image.open(ds[0]['''file''']).convert('''RGB''')
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCAmelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase)
self.assertListEqual(encoding.boxes , _lowercase)
# with apply_OCR = False
UpperCAmelCase_ = LayoutLMvaImageProcessor(apply_ocr=_lowercase)
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCamelCase : List[Any] =logging.get_logger(__name__)
# General docstring
lowerCamelCase : List[str] ='''ResNetConfig'''
# Base docstring
lowerCamelCase : Optional[Any] ='''microsoft/resnet-50'''
lowerCamelCase : Optional[Any] =[1, 2048, 7, 7]
# Image classification docstring
lowerCamelCase : Optional[Any] ='''microsoft/resnet-50'''
lowerCamelCase : int ='''tiger cat'''
lowerCamelCase : str =[
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : str = "relu" ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[Any] = nn.Convad(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = nn.BatchNormad(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.normalization(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.activation(SCREAMING_SNAKE_CASE )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ResNetConfig ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCamelCase__ : List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCamelCase__ : int = config.num_channels
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCamelCase__ : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.pooler(SCREAMING_SNAKE_CASE )
return embedding
class __a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 2 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Optional[int] = nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 , stride=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.convolution(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.normalization(SCREAMING_SNAKE_CASE )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : str = "relu" ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[Any] = in_channels != out_channels or stride != 1
UpperCamelCase__ : str = (
ResNetShortCut(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : Any = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE ) , ResNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , activation=SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : List[str] = ACTaFN[activation]
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = hidden_state
UpperCamelCase__ : Tuple = self.layer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCamelCase__ : List[str] = self.activation(SCREAMING_SNAKE_CASE )
return hidden_state
class __a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : str = "relu" , SCREAMING_SNAKE_CASE : int = 4 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Union[str, Any] = in_channels != out_channels or stride != 1
UpperCamelCase__ : Union[str, Any] = out_channels // reduction
UpperCamelCase__ : Union[str, Any] = (
ResNetShortCut(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : Optional[Any] = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE ) , ResNetConvLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 , activation=SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : Any = ACTaFN[activation]
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Dict = hidden_state
UpperCamelCase__ : str = self.layer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCamelCase__ : Optional[int] = self.activation(SCREAMING_SNAKE_CASE )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : ResNetConfig , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Tuple = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
UpperCamelCase__ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : str = input
for layer in self.layers:
UpperCamelCase__ : Any = layer(SCREAMING_SNAKE_CASE )
return hidden_state
class __a ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : ResNetConfig ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase__ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , depth=SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
UpperCamelCase__ : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] = hidden_states + (hidden_state,)
UpperCamelCase__ : int = stage_module(SCREAMING_SNAKE_CASE )
if output_hidden_states:
UpperCamelCase__ : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE , hidden_states=SCREAMING_SNAKE_CASE , )
class __a ( A__ ):
_lowerCAmelCase : List[str] = ResNetConfig
_lowerCAmelCase : Optional[Any] = '''resnet'''
_lowerCAmelCase : Tuple = '''pixel_values'''
_lowerCAmelCase : List[str] = True
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Tuple = value
lowerCamelCase : Any =R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase : Optional[int] =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , A__ , )
class __a ( A__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = config
UpperCamelCase__ : List[Any] = ResNetEmbeddings(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = ResNetEncoder(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Tuple = self.embedder(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.encoder(
SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = encoder_outputs[0]
UpperCamelCase__ : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE , pooler_output=SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A__ , )
class __a ( A__ ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = config.num_labels
UpperCamelCase__ : Any = ResNetModel(SCREAMING_SNAKE_CASE )
# classification head
UpperCamelCase__ : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Tuple = self.resnet(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : str = self.classifier(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : Dict = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : List[str] = "single_label_classification"
else:
UpperCamelCase__ : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase__ : Dict = MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase__ : Any = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[str] = CrossEntropyLoss()
UpperCamelCase__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : str = BCEWithLogitsLoss()
UpperCamelCase__ : str = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not return_dict:
UpperCamelCase__ : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , A__ , )
class __a ( A__ , A__ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
super()._init_backbone(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = [config.embedding_size] + config.hidden_sizes
UpperCamelCase__ : str = ResNetEmbeddings(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = ResNetEncoder(SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : int = self.embedder(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.encoder(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = outputs.hidden_states
UpperCamelCase__ : int = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase__ : List[str] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE , ) | 189 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase : int ={
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] =['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__UpperCamelCase : Optional[Any] = '''.'''
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__UpperCamelCase : int = []
__UpperCamelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
__UpperCamelCase : Union[str, Any] = line.strip()
__UpperCamelCase : Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__UpperCamelCase : Union[str, Any] = '''\n'''.join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 350 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Any = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : List[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ : Dict = DDPMScheduler()
UpperCamelCase__ : List[Any] = AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : List[str] = pipe(generator=lowerCamelCase__ , steps=4 )
UpperCamelCase__ : Union[str, Any] = output.audios[0]
UpperCamelCase__ : Any = output.images[0]
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ : Optional[Any] = DDIMScheduler()
UpperCamelCase__ : Union[str, Any] = self.dummy_vqvae_and_unet
UpperCamelCase__ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Optional[int] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Union[str, Any] = pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=10 )
UpperCamelCase__ : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : Any = self.dummy_unet_condition
UpperCamelCase__ : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Union[str, Any] = torch.rand((1, 1, 10) )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output.images[0]
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = torch_device
UpperCamelCase__ : Dict = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Optional[Any] = pipe(generator=lowerCamelCase__ )
UpperCamelCase__ : Dict = output.audios[0]
UpperCamelCase__ : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : List[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 51 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : int = BertJapaneseTokenizer
snake_case__ : Optional[int] = False
snake_case__ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
super().setUp()
a_ : Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
a_ : Optional[int] = 'こんにちは、世界。 \nこんばんは、世界。'
a_ : Optional[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
a_ , a_ : int = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return text, ids
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
a_ : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
a_ : str = 'こんにちは、世界。\nこんばんは、世界。'
a_ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as handle:
a_ : Union[str, Any] = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
try:
a_ : List[str] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
try:
a_ : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = MecabTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
try:
a_ : Any = MecabTokenizer(
do_lower_case=SCREAMING_SNAKE_CASE__ , normalize_text=SCREAMING_SNAKE_CASE__ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
a_ : int = MecabTokenizer(normalize_text=SCREAMING_SNAKE_CASE__ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a_ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : int = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as handle:
a_ : Union[str, Any] = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : List[str] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Tuple = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Dict = SudachiTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : Any = SudachiTokenizer(normalize_text=SCREAMING_SNAKE_CASE__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : int = SudachiTokenizer(trim_whitespace=SCREAMING_SNAKE_CASE__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
a_ : Any = 'こんにちは、世界。\nこんばんは、世界。'
a_ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as handle:
a_ : Optional[Any] = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
a_ : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Union[str, Any] = JumanppTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : Optional[int] = JumanppTokenizer(normalize_text=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
a_ : Dict = JumanppTokenizer(trim_whitespace=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Tuple = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a_ : List[Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = i
a_ : Dict = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : List[str] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a_ : List[str] = tokenizer.subword_tokenizer
a_ : Optional[int] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a_ : Optional[int] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a_ : int = tokenizer.encode('ありがとう。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Dict = tokenizer.encode('どういたしまして。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[int] = BertJapaneseTokenizer
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
super().setUp()
a_ : int = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
a_ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a_ : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a_ : Any = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Dict = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a_ : Dict = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : str = i
a_ : Optional[Any] = CharacterTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a_ : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : int = 'cl-tohoku/bert-base-japanese'
a_ : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a_ : Tuple = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 32 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__A : int = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
__A : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = cn.convert_to_negative(_UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Any = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase : Dict = canny.canny(_UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
assert gg.gaussian_filter(_UpperCAmelCase, 5, sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
lowerCAmelCase : List[Any] = conv.img_convolve(_UpperCAmelCase, _UpperCAmelCase ).astype(_UpperCAmelCase )
assert res.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
assert med.median_filter(_UpperCAmelCase, 3 ).any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Any = sob.sobel_filter(_UpperCAmelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = sp.make_sepia(_UpperCAmelCase, 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = bs.Burkes(imread(_UpperCAmelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg", ) -> str:
'''simple docstring'''
lowerCAmelCase : int = rs.NearestNeighbour(imread(_UpperCAmelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase : Dict = imread(_UpperCAmelCase, 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase : Any = 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = image[x_coordinate][y_coordinate]
lowerCAmelCase : List[Any] = lbp.get_neighbors_pixel(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
lowerCAmelCase : Tuple = lbp.local_binary_value(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert lbp_image.any()
| 138 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xlnet"
_a = ["mems"]
_a = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=3_2000 , _a=1024 , _a=24 , _a=16 , _a=4096 , _a="gelu" , _a=True , _a="bi" , _a=0.02 , _a=1e-12 , _a=0.1 , _a=512 , _a=None , _a=True , _a=False , _a=False , _a=-1 , _a=False , _a="last" , _a=True , _a="tanh" , _a=0.1 , _a=5 , _a=5 , _a=5 , _a=1 , _a=2 , **_a , ) -> Union[str, Any]:
_A : str = vocab_size
_A : List[Any] = d_model
_A : Union[str, Any] = n_layer
_A : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A : List[Any] = d_model // n_head
_A : str = ff_activation
_A : Any = d_inner
_A : List[Any] = untie_r
_A : str = attn_type
_A : Any = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : Dict = dropout
_A : int = mem_len
_A : Optional[Any] = reuse_len
_A : Tuple = bi_data
_A : List[Any] = clamp_len
_A : Tuple = same_length
_A : str = summary_type
_A : int = summary_use_proj
_A : Optional[int] = summary_activation
_A : List[str] = summary_last_dropout
_A : Optional[int] = start_n_top
_A : int = end_n_top
_A : Optional[int] = bos_token_id
_A : Optional[int] = pad_token_id
_A : int = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , _a , )
_A : Dict = kwargs["""use_cache"""]
_A : List[str] = use_mems_eval
_A : str = use_mems_train
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
@property
def a__ ( self ) -> int:
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def a__ ( self , _a ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 355 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : str=32 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : List[Any]=[1, 2, 1] ,lowerCamelCase__ : int=[2, 2, 4] ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : str=2.0 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Tuple=1e-5 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : int=10 ,lowerCamelCase__ : Tuple=8 ,lowerCamelCase__ : List[Any]=["stage1", "stage2", "stage3"] ,lowerCamelCase__ : str=[1, 2, 3] ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
UpperCAmelCase__ = out_features
UpperCAmelCase__ = out_indices
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : List[Any] ):
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(_lowerCamelCase )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ):
UpperCAmelCase__ = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ = ["""stem"""]
UpperCAmelCase__ = MaskFormerSwinBackbone(config=_lowerCamelCase )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = MaskFormerSwinModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=_lowerCamelCase ,embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def __lowerCAmelCase ( self : Dict ):
pass
def __lowerCAmelCase ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : str ):
return
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def __lowerCAmelCase ( self : Any ):
pass
@unittest.skip('Swin does not support feedforward chunking' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase ,nn.Linear ) )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_lowerCamelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCamelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def __lowerCAmelCase ( self : Any ):
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def __lowerCAmelCase ( self : int ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase )
# Swin has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,(padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def __lowerCAmelCase ( self : str ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase__ : int ):
UpperCAmelCase__ = 0
return t
def check_equivalence(lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any]={} ):
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCamelCase ,return_dict=_lowerCamelCase ,**_lowerCamelCase )
UpperCAmelCase__ = model(**_lowerCamelCase ,return_dict=_lowerCamelCase ,**_lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ):
if isinstance(_lowerCamelCase ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase ,_lowerCamelCase ):
recursive_check(_lowerCamelCase ,_lowerCamelCase )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(_lowerCamelCase ,_lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) ,set_nan_tensor_to_zero(_lowerCamelCase ) ,atol=1e-5 ) ,msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has'''
f''' `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}.'''
) ,)
recursive_check(_lowerCamelCase ,_lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase )
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase )
check_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ,return_labels=_lowerCamelCase )
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ,return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase )
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase )
check_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,{'output_hidden_states': True} )
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ,return_labels=_lowerCamelCase )
UpperCAmelCase__ = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ,return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,{'output_hidden_states': True} )
@require_torch
class snake_case ( unittest.TestCase , lowerCAmelCase__ ):
"""simple docstring"""
snake_case__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
snake_case__ = MaskFormerSwinConfig
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = MaskFormerSwinModelTester(self )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase__ = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
UpperCAmelCase__ = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,_lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase__ = backbone(**_lowerCamelCase ,output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase__ = backbone(**_lowerCamelCase ,output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 98 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=99 , _lowerCamelCase=13 , _lowerCamelCase=16 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=30 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Union[str, Any] = decoder_seq_length
# For common tests
UpperCAmelCase__ : int = self.decoder_seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : Optional[Any] = use_attention_mask
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : List[Any] = d_model
UpperCAmelCase__ : List[str] = d_model
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : Any = decoder_layers
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : Any = decoder_attention_heads
UpperCAmelCase__ : List[str] = decoder_attention_heads
UpperCAmelCase__ : List[str] = eos_token_id
UpperCAmelCase__ : int = bos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : Any = decoder_start_token_id
UpperCAmelCase__ : Dict = use_cache
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Dict = decoder_seq_length
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : List[str] = 1
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[str] = None
if self.use_attention_mask:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : str = TrOCRDecoder(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
UpperCAmelCase__ : Any = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCAmelCase__ : str = model(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , use_cache=_lowerCamelCase )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) + 1 )
UpperCAmelCase__ : List[Any] = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCAmelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : int = model(_lowerCamelCase )["""last_hidden_state"""]
UpperCAmelCase__ : Dict = model(_lowerCamelCase , past_key_values=_lowerCamelCase )["""last_hidden_state"""]
# select random slice
UpperCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : int = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCAmelCase__ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _a (self ):
"""simple docstring"""
pass
| 171 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ = logging.get_logger(__name__)
a_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """Model type selected in the list: """ + """, """.join(__lowerCAmelCase )} )
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a__ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a__ = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a__ = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a__ = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a__ = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a__ = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a__ = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class __snake_case ( __lowerCAmelCase ):
a__ = """train"""
a__ = """dev"""
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = 42
a__ = 42
a__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = args
a__: str = is_language_sensitive
a__: Union[str, Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a):
try:
a__: int = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
a__: List[str] = mode
# Load data features from cache or dataset file
a__: Union[str, Any] = "v2" if args.version_2_with_negative else "v1"
a__: List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__: Optional[Any] = cached_features_file + ".lock"
with FileLock(_a):
if os.path.exists(_a) and not args.overwrite_cache:
a__: List[Any] = time.time()
a__: Dict = torch.load(_a)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a__: Dict = self.old_features["features"]
a__: Optional[Any] = self.old_features.get('dataset' , _a)
a__: str = self.old_features.get('examples' , _a)
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run')
else:
if mode == Split.dev:
a__: Optional[int] = self.processor.get_dev_examples(args.data_dir)
else:
a__: Optional[Any] = self.processor.get_train_examples(args.data_dir)
a__: List[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
a__: Any = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__( self) -> Optional[Any]:
'''simple docstring'''
return len(self.features)
def __getitem__( self , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = self.features[i]
a__: Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long)
a__: Dict = torch.tensor(feature.attention_mask , dtype=torch.long)
a__: List[str] = torch.tensor(feature.token_type_ids , dtype=torch.long)
a__: Tuple = torch.tensor(feature.cls_index , dtype=torch.long)
a__: str = torch.tensor(feature.p_mask , dtype=torch.float)
a__: Dict = torch.tensor(feature.is_impossible , dtype=torch.float)
a__: Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask})
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible})
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
a__: Optional[int] = torch.tensor(feature.start_position , dtype=torch.long)
a__: List[Any] = torch.tensor(feature.end_position , dtype=torch.long)
inputs.update({'start_positions': start_positions, 'end_positions': end_positions})
return inputs
| 369 | """simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=0.9 , lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
a__: int = parent
a__: int = batch_size
a__: int = image_size
a__: Optional[int] = num_channels
a__: List[str] = patch_size
a__: List[str] = tubelet_size
a__: Any = num_frames
a__: Any = is_training
a__: Dict = use_labels
a__: Optional[Any] = hidden_size
a__: Optional[int] = num_hidden_layers
a__: Optional[Any] = num_attention_heads
a__: Optional[Any] = intermediate_size
a__: Any = hidden_act
a__: Dict = hidden_dropout_prob
a__: Union[str, Any] = attention_probs_dropout_prob
a__: List[Any] = type_sequence_label_size
a__: Optional[Any] = initializer_range
a__: List[str] = mask_ratio
a__: Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
a__: Dict = (image_size // patch_size) ** 2
a__: Tuple = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
a__: Tuple = int(mask_ratio * self.seq_length)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
a__: Any = None
if self.use_labels:
a__: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Any = VideoMAEModel(config=lowercase)
model.to(lowercase)
model.eval()
a__: Optional[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: List[str] = VideoMAEForPreTraining(lowercase)
model.to(lowercase)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__: int = torch.ones((self.num_masks,))
a__: Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
a__: int = mask.expand(self.batch_size , -1).bool()
a__: Union[str, Any] = model(lowercase , lowercase)
# model only returns predictions for masked patches
a__: List[str] = mask.sum().item()
a__: str = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = self.prepare_config_and_inputs()
a__ , a__ , a__: Dict = config_and_inputs
a__: Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a__ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[str] = VideoMAEModelTester(self)
a__: str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=False) -> Any:
'''simple docstring'''
a__: Optional[int] = copy.deepcopy(lowercase)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__: List[Any] = torch.ones((self.model_tester.num_masks,))
a__: List[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
a__: Optional[int] = mask.expand(self.model_tester.batch_size , -1).bool()
a__: Union[str, Any] = bool_masked_pos.to(lowercase)
if return_labels:
if model_class in [
*get_values(lowercase),
]:
a__: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase)
return inputs_dict
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__ , a__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Union[str, Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__: str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Any = model_class(lowercase)
a__: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Optional[Any] = [*signature.parameters.keys()]
a__: Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: int = VideoMAEModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
a__ , a__: Any = self.model_tester.prepare_config_and_inputs_for_common()
a__: str = True
for model_class in self.all_model_classes:
a__: Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks
a__: List[str] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
a__: Tuple = True
a__: str = False
a__: Dict = True
a__: List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: int = model(**self._prepare_for_class(lowercase , lowercase))
a__: Any = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__: Tuple = True
a__: List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: str = model(**self._prepare_for_class(lowercase , lowercase))
a__: int = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a__: Optional[Any] = len(lowercase)
# Check attention is always last and order is fine
a__: str = True
a__: Dict = True
a__: Tuple = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__: int = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__: Union[str, Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: Tuple = model(**self._prepare_for_class(lowercase , lowercase))
a__: Dict = outputs.hidden_states
a__: Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase) , lowercase)
a__: Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
a__: Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a__ , a__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Dict = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__: List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
def __a ( ) ->List[Any]:
a__: List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__: Dict = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics').to(
lowercase)
a__: Dict = self.default_image_processor
a__: str = prepare_video()
a__: Tuple = image_processor(lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__: List[Any] = model(**lowercase)
# verify the logits
a__: str = torch.Size((1, 4_00))
self.assertEqual(outputs.logits.shape , lowercase)
a__: Optional[Any] = torch.tensor([0.3669, -0.0688, -0.2421]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
@slow
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short').to(lowercase)
a__: Optional[Any] = self.default_image_processor
a__: List[Any] = prepare_video()
a__: Union[str, Any] = image_processor(lowercase , return_tensors='pt').to(lowercase)
# add boolean mask, indicating which patches to mask
a__: Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt')
a__: Any = torch.load(lowercase)
# forward pass
with torch.no_grad():
a__: Any = model(**lowercase)
# verify the logits
a__: Union[str, Any] = torch.Size([1, 14_08, 15_36])
a__: Union[str, Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowercase)
self.assertEqual(outputs.logits.shape , lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `True`)
a__: Optional[int] = torch.tensor([0.5142] , device=lowercase)
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `False`)
a__: int = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowercase).to(
lowercase)
with torch.no_grad():
a__: Union[str, Any] = model(**lowercase)
a__: Optional[int] = torch.tensor(torch.tensor([0.6469]) , device=lowercase)
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4))
| 203 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = FileLock(str(tmpdir / '''foo.lock''' ) )
A__ = FileLock(str(tmpdir / '''foo.lock''' ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(lowercase_ ):
A__ = time.time()
locka.acquire(lowercase_ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = '''a''' * 1_000 + '''.lock'''
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(lowercase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase_ ):
locka.acquire(0 )
| 14 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =RoCBertTokenizer
a : Any =None
a : List[Any] =False
a : int =True
a : List[Any] =filter_non_english
def _a ( self ):
super().setUp()
UpperCamelCase_: Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
UpperCamelCase_: str = {}
UpperCamelCase_: List[str] = {}
for i, value in enumerate(_lowerCamelCase ):
UpperCamelCase_: str = i
UpperCamelCase_: Tuple = i
UpperCamelCase_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
UpperCamelCase_: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase_: Tuple = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_lowerCamelCase , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ):
UpperCamelCase_: Optional[int] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
UpperCamelCase_: List[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ):
UpperCamelCase_: str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
UpperCamelCase_: str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
UpperCamelCase_: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
UpperCamelCase_: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
UpperCamelCase_: List[str] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ):
UpperCamelCase_: Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
UpperCamelCase_: Union[str, Any] = {}
for i, token in enumerate(_lowerCamelCase ):
UpperCamelCase_: str = i
UpperCamelCase_: Optional[int] = RoCBertWordpieceTokenizer(vocab=_lowerCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ):
UpperCamelCase_: str = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
UpperCamelCase_: Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: int = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCamelCase_: Optional[int] = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
UpperCamelCase_: Optional[int] = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , 'do_lower_case' ) else False
UpperCamelCase_: Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ):
UpperCamelCase_: Any = ['的', '人', '有']
UpperCamelCase_: List[str] = ''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Dict = True
UpperCamelCase_: List[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: int = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
UpperCamelCase_: Any = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[int] = False
UpperCamelCase_: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Any = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase_: str = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase_: Optional[Any] = tokenizer.encode('你好' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer.encode('你是谁' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ):
UpperCamelCase_: Any = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase_: Union[str, Any] = '你好,你是谁'
UpperCamelCase_: List[Any] = tokenizer.tokenize(_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_: List[Any] = tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase )
UpperCamelCase_: Any = tokenizer.prepare_for_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Tuple = tokenizer.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase ) | 292 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Any = mock.Mock()
UpperCamelCase_: Dict = 5_0_0
UpperCamelCase_: Any = {}
UpperCamelCase_: Tuple = HTTPError
UpperCamelCase_: List[str] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Union[str, Any] = mock.Mock()
UpperCamelCase_: Union[str, Any] = 5_0_0
UpperCamelCase_: str = {}
UpperCamelCase_: List[str] = HTTPError
UpperCamelCase_: Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: str = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase_: Optional[int] = tempfile.mktemp()
with open(_lowerCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _lowerCamelCase )
UpperCamelCase_: Tuple = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _lowerCamelCase )
UpperCamelCase_: List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_: Any = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ):
UpperCamelCase_: Optional[int] = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: List[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: int = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Union[str, Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id='test-tokenizer' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: List[str] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Union[str, Any] = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase_: Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: Optional[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Optional[int] = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: str = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Dict = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase_: List[str] = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase_: int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Dict = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _a ( self ):
UpperCamelCase_: Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def _a ( self ):
UpperCamelCase_: int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def _a ( self ):
UpperCamelCase_: str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def _a ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase_: Union[str, Any] = Trie()
UpperCamelCase_: Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ['AB', 'C'] ) | 292 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , snake_case__ )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(snake_case__ )
assert result == expected | 74 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : Dict =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[Any] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : Tuple =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : Tuple =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : Dict =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : Optional[Any] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : Union[str, Any] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : Union[str, Any] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[Any] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""flax"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""flax"""] )
| 46 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""decision_transformer"""
a : List[Any] =["""past_key_values"""]
a : Dict ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=17,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 46 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_lowerCamelCase =logging.getLogger(__name__)
_lowerCamelCase =list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_lowerCamelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__SCREAMING_SNAKE_CASE)} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_UpperCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def UpperCamelCase__ ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_UpperCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The input training data file (a text file)."""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
_UpperCAmelCase : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_UpperCAmelCase : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def UpperCamelCase__ ( self ):
if self.train_file is not None:
lowerCamelCase : Any = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, """r""", encoding="""utf-8""" ) as f:
lowerCamelCase : Optional[int] = [json.loads(lowerCamelCase ) for line in f.read().splitlines() if (len(lowerCamelCase ) > 0 and not line.isspace())]
assert len(lowerCamelCase ) == len(lowerCamelCase )
lowerCamelCase : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase : Dict = refs
return Dataset.from_dict(lowerCamelCase )
def _a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : Dict = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'''train[:{data_args.validation_split_percentage}%]''', )
lowerCamelCase : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'''train[{data_args.validation_split_percentage}%:]''', )
else:
lowerCamelCase : Any = {}
if data_args.train_file is not None:
lowerCamelCase : List[str] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase : int = data_args.validation_file
lowerCamelCase : List[str] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase : Tuple = """text"""
lowerCamelCase : Optional[int] = load_dataset(lowerCamelCase, data_files=lowerCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase : List[str] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase : Any = AutoConfig.from_pretrained(model_args.config_name, **lowerCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCamelCase )
else:
lowerCamelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowerCamelCase : str = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCamelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase : Dict = AutoModelForMaskedLM.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase : Union[str, Any] = datasets["""train"""].column_names
else:
lowerCamelCase : List[Any] = datasets["""validation"""].column_names
lowerCamelCase : Dict = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase : Dict = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowerCamelCase ):
# Remove empty lines
lowerCamelCase : List[Any] = [line for line in examples["""text"""] if len(lowerCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""], padding=lowerCamelCase, truncation=lowerCamelCase, max_length=data_args.max_seq_length )
lowerCamelCase : Any = datasets.map(
lowerCamelCase, batched=lowerCamelCase, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase : Optional[int] = add_chinese_references(tokenized_datasets["""train"""], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase : Union[str, Any] = add_chinese_references(
tokenized_datasets["""validation"""], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase : Optional[int] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase : Optional[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase : List[str] = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase : int = Trainer(
model=lowerCamelCase, args=lowerCamelCase, train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None, eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None, tokenizer=lowerCamelCase, data_collator=lowerCamelCase, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase : List[str] = model_args.model_name_or_path
else:
lowerCamelCase : str = None
lowerCamelCase : str = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase : Tuple = os.path.join(training_args.output_dir, """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowerCamelCase, """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, """trainer_state.json""" ) )
# Evaluation
lowerCamelCase : Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : List[str] = trainer.evaluate()
lowerCamelCase : List[Any] = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase : int = perplexity
lowerCamelCase : List[str] = os.path.join(training_args.output_dir, """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowerCamelCase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 1 |
"""simple docstring"""
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCamelCase ) )
def lowerCamelCase ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _UpperCamelCase ) if sum_of_digit_factorial(_UpperCamelCase ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 353 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _UpperCamelCase ( snake_case__ ) -> int:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def _UpperCamelCase ( snake_case__ ) -> Dict:
# word like '180' or '身高' or '神'
for char in word:
__UpperCAmelCase : Union[str, Any] = ord(snake_case__ )
if not _is_chinese_char(snake_case__ ):
return 0
return 1
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : str = set()
for token in tokens:
__UpperCAmelCase : Optional[int] = len(snake_case__ ) > 1 and is_chinese(snake_case__ )
if chinese_word:
word_set.add(snake_case__ )
__UpperCAmelCase : Any = list(snake_case__ )
return word_list
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
__UpperCAmelCase : List[str] = max([len(snake_case__ ) for w in chinese_word_set] )
__UpperCAmelCase : int = bert_tokens
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = 0, len(snake_case__ )
while start < end:
__UpperCAmelCase : Tuple = True
if is_chinese(bert_word[start] ):
__UpperCAmelCase : str = min(end - start, snake_case__ )
for i in range(snake_case__, 1, -1 ):
__UpperCAmelCase : Any = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
__UpperCAmelCase : str = "##" + bert_word[j]
__UpperCAmelCase : Tuple = start + i
__UpperCAmelCase : List[Any] = False
break
if single_word:
start += 1
return bert_word
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Tuple = []
for i in range(0, len(snake_case__ ), 100 ):
__UpperCAmelCase : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__UpperCAmelCase : Optional[int] = [get_chinese_word(snake_case__ ) for r in res]
ltp_res.extend(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
__UpperCAmelCase : Any = []
for i in range(0, len(snake_case__ ), 100 ):
__UpperCAmelCase : str = bert_tokenizer(lines[i : i + 100], add_special_tokens=snake_case__, truncation=snake_case__, max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(snake_case__ ) == len(snake_case__ )
__UpperCAmelCase : int = []
for input_ids, chinese_word in zip(snake_case__, snake_case__ ):
__UpperCAmelCase : str = []
for id in input_ids:
__UpperCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(snake_case__ )
input_tokens.append(snake_case__ )
__UpperCAmelCase : int = add_sub_symbol(snake_case__, snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__ ):
if token[:2] == "##":
__UpperCAmelCase : str = token[2:]
# save chinese tokens' pos
if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ):
ref_id.append(snake_case__ )
ref_ids.append(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
return ref_ids
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : Union[str, Any] = f.readlines()
__UpperCAmelCase : Dict = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCAmelCase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
__UpperCAmelCase : int = BertTokenizer.from_pretrained(args.bert )
__UpperCAmelCase : List[str] = prepare_ref(snake_case__, snake_case__, snake_case__ )
with open(args.save_path, "w", encoding="utf-8" ) as f:
__UpperCAmelCase : Any = [json.dumps(snake_case__ ) + "\n" for ref in ref_ids]
f.writelines(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
_snake_case = parser.parse_args()
main(args)
| 157 | def _UpperCamelCase ( snake_case__ ) -> bool:
if not isinstance(snake_case__, snake_case__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__UpperCAmelCase : Optional[int] = str(snake_case__ )
__UpperCAmelCase : Any = "".join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCamelCase ( snake_case__ = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 157 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = ['pixel_values']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PIL.Image.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_lowerCamelCase = get_size_dict(lowerCamelCase__ )
_lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = do_center_crop
_lowerCamelCase = crop_size
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PIL.Image.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCamelCase__ , size=(size['''height'''], size['''width''']) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(lowerCamelCase__ )
_lowerCamelCase = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
_lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_lowerCamelCase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 73 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> int:
_lowerCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> List[str]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowerCamelCase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowerCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict ) -> str:
_lowerCamelCase = dct.pop(lowercase_ )
_lowerCamelCase = val
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Union[str, Any]:
if "handwritten" in checkpoint_url:
_lowerCamelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Dict ) -> List[str]:
_lowerCamelCase = ViTConfig(image_size=3_84 , qkv_bias=lowercase_ )
_lowerCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowerCamelCase = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowerCamelCase = 10_24
_lowerCamelCase = 40_96
_lowerCamelCase = 24
_lowerCamelCase = 16
_lowerCamelCase = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase = False
_lowerCamelCase = '''relu'''
_lowerCamelCase = 10_24
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
# load HuggingFace model
_lowerCamelCase = ViTModel(lowercase_ , add_pooling_layer=lowercase_ )
_lowerCamelCase = TrOCRForCausalLM(lowercase_ )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
# load state_dict of original model, rename some keys
_lowerCamelCase = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' , check_hash=lowercase_ )['''model''']
_lowerCamelCase = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowerCamelCase = state_dict.pop(lowercase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
_lowerCamelCase = val
else:
_lowerCamelCase = val
# load state dict
model.load_state_dict(lowercase_ )
# Check outputs on an image
_lowerCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_lowerCamelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
_lowerCamelCase = TrOCRProcessor(lowercase_ , lowercase_ )
_lowerCamelCase = processor(images=prepare_img(lowercase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
_lowerCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowerCamelCase = model(pixel_values=lowercase_ , decoder_input_ids=lowercase_ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowercase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 73 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : str = 1
_UpperCAmelCase : int = 1
while repunit:
_UpperCAmelCase : List[Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase_ ( _UpperCAmelCase : Tuple = 1_000_000 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 31 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCAmelCase ( ) -> Tuple:
__a = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
__a = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
__a = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__a = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['MobileViTFeatureExtractor']
A : str = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : int = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
a : Optional[int] = doctest.OutputChecker
class a ( _lowerCamelCase ):
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
a : List[Any] = CustomOutputChecker
a : Optional[int] = HfDoctestModule
a : Tuple = HfDocTestParser
| 56 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
lowercase__ = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
lowercase__ = ''''''
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
lowercase__ = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase__ = []
lowercase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
lowercase__ = '''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowercase__ = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowercase__ = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 110 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : int , **lowerCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 179 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
SCREAMING_SNAKE_CASE__ = f"""https://www.google.com/search?q={query}&num=100"""
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)['url'][0]
webbrowser.open(link)
| 150 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ : str = '''pt'''
elif is_tf_available():
lowercase__ : Optional[Any] = '''tf'''
else:
lowercase__ : Optional[Any] = '''jax'''
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = PerceiverTokenizer
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
__A : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5):
'''simple docstring'''
__A : Dict = []
for i in range(len(_UpperCAmelCase)):
try:
__A : Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase)
except UnicodeDecodeError:
pass
toks.append((i, tok))
__A : Tuple = list(filter(lambda _UpperCAmelCase: re.match(R'^[ a-zA-Z]+$' , t[1]) , _UpperCAmelCase))
__A : Tuple = list(filter(lambda _UpperCAmelCase: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase) , _UpperCAmelCase))
if max_length is not None and len(_UpperCAmelCase) > max_length:
__A : Any = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase) < min_length and len(_UpperCAmelCase) > 0:
while len(_UpperCAmelCase) < min_length:
__A : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__A : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__A : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase)
if " " not in output_txt and len(_UpperCAmelCase) > 1:
__A : List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase)
)
if with_prefix_space:
__A : str = ' ' + output_txt
__A : int = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.perceiver_tokenizer
__A : int = 'Unicode €.'
__A : Any = tokenizer(_UpperCAmelCase)
__A : Optional[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase)
# decoding
__A : Union[str, Any] = tokenizer.decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]')
__A : Optional[int] = tokenizer('e è é ê ë')
__A : List[str] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase)
# decoding
__A : List[Any] = tokenizer.decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')) , '[CLS]e è é ê ë[SEP]')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.perceiver_tokenizer
__A : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__A : Optional[int] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__A : int = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
if FRAMEWORK != "jax":
__A : str = list(batch.input_ids.numpy()[0])
else:
__A : Union[str, Any] = list(batch.input_ids.tolist()[0])
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual((2, 38) , batch.input_ids.shape)
self.assertEqual((2, 38) , batch.attention_mask.shape)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.perceiver_tokenizer
__A : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : int = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase)
self.assertIn('attention_mask' , _UpperCAmelCase)
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase)
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.perceiver_tokenizer
__A : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
__A : str = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
self.assertEqual(32 , targets['input_ids'].shape[1])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
__A : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
__A : Any = tempfile.mkdtemp()
__A : Any = ' He is very happy, UNwant\u00E9d,running'
__A : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
__A : int = tokenizer.__class__.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
shutil.rmtree(_UpperCAmelCase)
__A : List[str] = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
__A : str = tempfile.mkdtemp()
__A : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
__A : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
__A : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Tuple = tokenizer.__class__.from_pretrained(_UpperCAmelCase)
__A : Dict = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
__A : int = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase)
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json') , encoding='utf-8') as json_file:
__A : str = json.load(_UpperCAmelCase)
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json') , encoding='utf-8') as json_file:
__A : int = json.load(_UpperCAmelCase)
__A : Dict = [F'<extra_id_{i}>' for i in range(125)]
__A : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
__A : Optional[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : Dict = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase)]
__A : Any = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178]) , '�')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
__A : int = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__A : Dict = tokenizer.convert_tokens_to_string(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase) | 190 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase__ : List[Any] = HfArgumentParser(InitializationArguments)
lowercase__ : List[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase__ : List[str] = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
lowercase__ : Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase__ : Optional[int] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 190 | 1 |
def __magic_name__ ( __a : int = 10 , __a : int = 22 ):
'''simple docstring'''
UpperCamelCase__ = range(1 , __a )
UpperCamelCase__ = range(1 , __a )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }')
| 244 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
if not isinstance(_A, _A ):
raise TypeError("only integers accepted as input" )
else:
__UpperCAmelCase : List[str] = str(abs(_A ) )
__UpperCAmelCase : Optional[int] = [list(_A ) for char in range(len(_A ) )]
for index in range(len(_A ) ):
num_transpositions[index].pop(_A )
return max(
int("".join(list(_A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 357 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_UpperCAmelCase ):
with open(_UpperCAmelCase, "r" ) as fh:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_UN )
lowerCAmelCase__ : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ : Optional[int] = torch.device("cuda", local_rank)
lowerCAmelCase__ : List[str] = socket.gethostname()
lowerCAmelCase__ : Optional[Any] = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ : Tuple = dist.get_rank()
lowerCAmelCase__ : Optional[int] = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 37 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if num <= 0:
__SCREAMING_SNAKE_CASE = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [True] * (num + 1)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
__SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 54 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Optional[int] = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : str = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 106 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A_ :Dict = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A ( a_ ) -> Optional[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A ( a_ ,a_ ) -> int:
if args.student_type == "roberta":
__UpperCamelCase : Tuple =False
elif args.student_type == "gpt2":
__UpperCamelCase : str =False
def A ( a_ ,a_ ) -> str:
if args.student_type == "roberta":
__UpperCamelCase : Optional[int] =False
def A ( ) -> List[str]:
__UpperCamelCase : List[Any] =argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' ,action='store_true' ,help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' ,type=a_ ,required=a_ ,help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' ,type=a_ ,required=a_ ,help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' ,)
parser.add_argument(
'--student_type' ,type=a_ ,choices=['distilbert', 'roberta', 'gpt2'] ,required=a_ ,help='The student type (DistilBERT, RoBERTa).' ,)
parser.add_argument('--student_config' ,type=a_ ,required=a_ ,help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' ,default=a_ ,type=a_ ,help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' ,choices=['bert', 'roberta', 'gpt2'] ,required=a_ ,help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' ,type=a_ ,required=a_ ,help='The teacher model.' )
parser.add_argument('--temperature' ,default=2.0 ,type=a_ ,help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' ,default=0.5 ,type=a_ ,help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' ,default=0.0 ,type=a_ ,help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' ,)
parser.add_argument('--alpha_clm' ,default=0.5 ,type=a_ ,help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' ,default=0.0 ,type=a_ ,help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' ,default=0.0 ,type=a_ ,help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' ,action='store_true' ,help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' ,default=0.15 ,type=a_ ,help='Proportion of tokens for which we need to make a prediction.' ,)
parser.add_argument('--word_mask' ,default=0.8 ,type=a_ ,help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' ,default=0.1 ,type=a_ ,help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' ,default=0.1 ,type=a_ ,help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' ,default=0.7 ,type=a_ ,help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' ,)
parser.add_argument('--token_counts' ,type=a_ ,help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' ,action='store_true' ,help='If true, compute the distillation loss only the [MLM] prediction distribution.' ,)
parser.add_argument(
'--freeze_pos_embs' ,action='store_true' ,help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' ,)
parser.add_argument(
'--freeze_token_type_embds' ,action='store_true' ,help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' ,)
parser.add_argument('--n_epoch' ,type=a_ ,default=3 ,help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' ,type=a_ ,default=5 ,help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' ,action='store_false' ,help='If true, group sequences that have similar length into the same batch. Default is true.' ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=a_ ,default=50 ,help='Gradient accumulation for larger training batches.' ,)
parser.add_argument('--warmup_prop' ,default=0.05 ,type=a_ ,help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' ,default=0.0 ,type=a_ ,help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' ,default=5e-4 ,type=a_ ,help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' ,default=1e-6 ,type=a_ ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,default=5.0 ,type=a_ ,help='Max gradient norm.' )
parser.add_argument('--initializer_range' ,default=0.02 ,type=a_ ,help='Random initialization range.' )
parser.add_argument(
'--fp16' ,action='store_true' ,help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' ,)
parser.add_argument(
'--fp16_opt_level' ,type=a_ ,default='O1' ,help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) ,)
parser.add_argument('--n_gpu' ,type=a_ ,default=1 ,help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' ,type=a_ ,default=-1 ,help='Distributed training - Local rank' )
parser.add_argument('--seed' ,type=a_ ,default=56 ,help='Random seed' )
parser.add_argument('--log_interval' ,type=a_ ,default=500 ,help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' ,type=a_ ,default=4_000 ,help='Checkpoint interval.' )
__UpperCamelCase : List[str] =parser.parse_args()
sanity_checks(a_ )
# ARGS #
init_gpu_params(a_ )
set_seed(a_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path ,'parameters.json' ) ,'w' ) as f:
json.dump(vars(a_ ) ,a_ ,indent=4 )
git_log(args.dump_path )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] =MODEL_CLASSES[args.student_type]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCamelCase : int =teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCamelCase : Union[str, Any] ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCamelCase : Optional[int] =tokenizer.all_special_tokens.index(a_ )
__UpperCamelCase : Dict =tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
__UpperCamelCase : int =special_tok_ids
__UpperCamelCase : List[Any] =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file ,'rb' ) as fp:
__UpperCamelCase : Dict =pickle.load(a_ )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts ,'rb' ) as fp:
__UpperCamelCase : Dict =pickle.load(a_ )
__UpperCamelCase : Dict =np.maximum(a_ ,1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCamelCase : str =0.0 # do not predict special tokens
__UpperCamelCase : Union[str, Any] =torch.from_numpy(a_ )
else:
__UpperCamelCase : Any =None
__UpperCamelCase : Dict =LmSeqsDataset(params=a_ ,data=a_ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
__UpperCamelCase : List[str] =student_config_class.from_pretrained(args.student_config )
__UpperCamelCase : Any =True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
__UpperCamelCase : Optional[int] =student_model_class.from_pretrained(args.student_pretrained_weights ,config=a_ )
else:
__UpperCamelCase : Optional[Any] =student_model_class(a_ )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
__UpperCamelCase : List[Any] =teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=a_ )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a_ ,a_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a_ ,a_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCamelCase : Union[str, Any] =Distiller(
params=a_ ,dataset=a_ ,token_probs=a_ ,student=a_ ,teacher=a_ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 245 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Union[str, Any] = logging.get_logger(__name__)
A_ :Tuple = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""xlnet"""
UpperCamelCase__ : Tuple =["""mems"""]
UpperCamelCase__ : Any ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=32000 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__="bi" , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=-1 , lowerCamelCase__=False , lowerCamelCase__="last" , lowerCamelCase__=True , lowerCamelCase__="tanh" , lowerCamelCase__=0.1 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =vocab_size
__UpperCamelCase : int =d_model
__UpperCamelCase : Optional[Any] =n_layer
__UpperCamelCase : str =n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__UpperCamelCase : Optional[Any] =d_model // n_head
__UpperCamelCase : List[Any] =ff_activation
__UpperCamelCase : Tuple =d_inner
__UpperCamelCase : List[Any] =untie_r
__UpperCamelCase : List[Any] =attn_type
__UpperCamelCase : Dict =initializer_range
__UpperCamelCase : List[str] =layer_norm_eps
__UpperCamelCase : List[str] =dropout
__UpperCamelCase : int =mem_len
__UpperCamelCase : List[Any] =reuse_len
__UpperCamelCase : Union[str, Any] =bi_data
__UpperCamelCase : Optional[Any] =clamp_len
__UpperCamelCase : Tuple =same_length
__UpperCamelCase : int =summary_type
__UpperCamelCase : Dict =summary_use_proj
__UpperCamelCase : Dict =summary_activation
__UpperCamelCase : str =summary_last_dropout
__UpperCamelCase : Dict =start_n_top
__UpperCamelCase : Optional[Any] =end_n_top
__UpperCamelCase : int =bos_token_id
__UpperCamelCase : Union[str, Any] =pad_token_id
__UpperCamelCase : Dict =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCamelCase__ , )
__UpperCamelCase : Dict =kwargs['use_cache']
__UpperCamelCase : Optional[int] =use_mems_eval
__UpperCamelCase : Any =use_mems_train
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 245 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple ={'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
A__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = ' Hello world! cécé herlolip'
UpperCAmelCase__ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
_snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
_snake_case = dct.pop(__lowerCamelCase )
_snake_case = val
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> str:
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]:
if not os.path.exists(__lowerCamelCase ):
_snake_case = torch.hub.load('''pytorch/fairseq''' , __lowerCamelCase ).eval()
else:
_snake_case = load_xsum_checkpoint(__lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case = checkpoint_path.replace('''.''' , '''-''' )
_snake_case = BartConfig.from_pretrained(__lowerCamelCase )
_snake_case = bart.encode(__lowerCamelCase ).unsqueeze(0 )
_snake_case = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case = bart.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_snake_case = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = BartForSequenceClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_snake_case = bart.predict('''mnli''' , __lowerCamelCase , return_logits=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )[0] # logits
else: # no classification heads to worry about
_snake_case = bart.model.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_snake_case = state_dict['''decoder.embed_tokens.weight''']
_snake_case = bart.extract_features(__lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case = BartModel(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_snake_case = model(__lowerCamelCase ).model[0]
else:
_snake_case = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__lowerCamelCase )
if hasattr(__lowerCamelCase , '''lm_head''' ):
_snake_case = make_linear_from_emb(model.model.shared )
_snake_case = model.model(__lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 288 | 0 |
UpperCamelCase = 'Tobias Carryer'
from time import time
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=int(time() ) ) -> List[Any]: # noqa: B008
lowerCAmelCase__ = multiplier
lowerCAmelCase__ = increment
lowerCAmelCase__ = modulo
lowerCAmelCase__ = seed
def a ( self : Any ) -> Any:
lowerCAmelCase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCamelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 366 |
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple =logging.get_logger(__name__)
a__ : Union[str, Any] ={'''vocab_file''': '''sentencepiece.model'''}
a__ : Dict ={
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
a__ : Optional[Any] ={
'''google/rembert''': 256,
}
class snake_case ( __a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , __A : List[str] , __A : str=False , __A : List[Any]=True , __A : Optional[int]=True , __A : int="[CLS]" , __A : int="[SEP]" , __A : List[str]="[UNK]" , __A : str="[SEP]" , __A : Any="[PAD]" , __A : int="[CLS]" , __A : List[Any]="[MASK]" , **__A : str , ):
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
__UpperCamelCase = do_lower_case
__UpperCamelCase = remove_space
__UpperCamelCase = keep_accents
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_A )
@property
def _lowerCamelCase ( self : Optional[int] ):
return len(self.sp_model )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self : Tuple , __A : Any ):
__UpperCamelCase = d
__UpperCamelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Optional[int] , __A : str , __A : str=False ):
__UpperCamelCase = self.sp_model.EncodeAsPieces(_A )
return pieces
def _lowerCamelCase ( self : Dict , __A : Optional[int] ):
return self.sp_model.PieceToId(_A )
def _lowerCamelCase ( self : Any , __A : Union[str, Any] ):
return self.sp_model.IdToPiece(_A )
def _lowerCamelCase ( self : Optional[Any] , __A : Any ):
__UpperCamelCase = self.sp_model.decode_pieces(_A )
return out_string
def _lowerCamelCase ( self : Optional[int] , __A : Union[str, Any] , __A : str = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : Tuple , __A : Optional[int] , __A : Dict = None , __A : Tuple = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def _lowerCamelCase ( self : List[Any] , __A : int , __A : List[Any] = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Optional[int] , __A : Optional[int] , __A : List[Any] = None ):
if not os.path.isdir(_A ):
logger.error('Vocabulary path ({}) should be a directory'.format(_A ) )
return
__UpperCamelCase = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 53 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Any:
_UpperCAmelCase : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase : str = emb.weight.shape
_UpperCAmelCase : Union[str, Any] = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase )
_UpperCAmelCase : List[str] = emb.weight.data
return lin_layer
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Dict="facebook/mbart-large-en-ro", _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCAmelCase )
_UpperCAmelCase : int = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_UpperCAmelCase : str = MBartConfig.from_pretrained(_lowerCAmelCase, vocab_size=_lowerCAmelCase )
if mbart_aa and finetuned:
_UpperCAmelCase : Any = """relu"""
_UpperCAmelCase : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""]
_UpperCAmelCase : Any = MBartForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase )
if finetuned:
_UpperCAmelCase : List[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCamelCase__ : str = parser.parse_args()
lowerCamelCase__ : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 246 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : int , ) -> List[Any]:
super().__init__()
_lowerCAmelCase = value_function
_lowerCAmelCase = unet
_lowerCAmelCase = scheduler
_lowerCAmelCase = env
_lowerCAmelCase = env.get_dataset()
_lowerCAmelCase = {}
for key in self.data.keys():
try:
_lowerCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
_lowerCAmelCase = {}
for key in self.data.keys():
try:
_lowerCAmelCase = self.data[key].std()
except: # noqa: E722
pass
_lowerCAmelCase = env.observation_space.shape[0]
_lowerCAmelCase = env.action_space.shape[0]
def lowercase__ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> Tuple:
return (x_in - self.means[key]) / self.stds[key]
def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Union[str, Any]:
return x_in * self.stds[key] + self.means[key]
def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[Any]:
if type(_SCREAMING_SNAKE_CASE ) is dict:
return {k: self.to_torch(_SCREAMING_SNAKE_CASE ) for k, v in x_in.items()}
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ):
return x_in.to(self.unet.device )
return torch.tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[Any] ) -> str:
for key, val in cond.items():
_lowerCAmelCase = val.clone()
return x_in
def lowercase__ ( self : List[str] , __snake_case : int , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : int ) -> int:
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCAmelCase = torch.full((batch_size,) , _SCREAMING_SNAKE_CASE , device=self.unet.device , dtype=torch.long )
for _ in range(_SCREAMING_SNAKE_CASE ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample
_lowerCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCAmelCase = self.scheduler._get_variance(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.exp(0.5 * posterior_variance )
_lowerCAmelCase = model_std * grad
_lowerCAmelCase = 0
_lowerCAmelCase = x.detach()
_lowerCAmelCase = x + scale * grad
_lowerCAmelCase = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
_lowerCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCAmelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predict_epsilon=_SCREAMING_SNAKE_CASE )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
_lowerCAmelCase = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
_lowerCAmelCase = self.to_torch(_SCREAMING_SNAKE_CASE )
return x, y
def __call__( self : Tuple , __snake_case : List[Any] , __snake_case : Tuple=64 , __snake_case : Union[str, Any]=32 , __snake_case : Union[str, Any]=2 , __snake_case : Any=0.1 ) -> Tuple:
_lowerCAmelCase = self.normalize(_SCREAMING_SNAKE_CASE , """observations""" )
_lowerCAmelCase = obs[None].repeat(_SCREAMING_SNAKE_CASE , axis=0 )
_lowerCAmelCase = {0: self.to_torch(_SCREAMING_SNAKE_CASE )}
_lowerCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCAmelCase = randn_tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
_lowerCAmelCase = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
_lowerCAmelCase = self.to_torch(_SCREAMING_SNAKE_CASE )
# run the diffusion process
_lowerCAmelCase = self.run_diffusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# sort output trajectories by value
_lowerCAmelCase = y.argsort(0 , descending=_SCREAMING_SNAKE_CASE ).squeeze()
_lowerCAmelCase = x[sorted_idx]
_lowerCAmelCase = sorted_values[:, :, : self.action_dim]
_lowerCAmelCase = actions.detach().cpu().numpy()
_lowerCAmelCase = self.de_normalize(_SCREAMING_SNAKE_CASE , key="""actions""" )
# select the action with the highest value
if y is not None:
_lowerCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
_lowerCAmelCase = np.random.randint(0 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 360 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase = 4_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = b, a + b
return sum(lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 220 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 | import argparse
import os
import re
_snake_case = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_snake_case = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
_snake_case = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase ( snake_case__, snake_case__ = False ) -> List[Any]:
with open(snake_case__, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : Dict = f.read()
__UpperCAmelCase : Optional[Any] = content.split("\n" )
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__UpperCAmelCase : str = len(re.search(r"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__UpperCAmelCase : Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__UpperCAmelCase : str = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__UpperCAmelCase : Dict = sorted(snake_case__, key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__, "w", encoding="utf-8" ) as f:
f.write("\n".join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def _UpperCamelCase ( snake_case__ = False ) -> Any:
__UpperCAmelCase : str = [os.path.join(snake_case__, snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith(".py" )]
__UpperCAmelCase : Optional[Any] = [sort_auto_mapping(snake_case__, overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
__UpperCAmelCase : List[Any] = [f for f, d in zip(snake_case__, snake_case__ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 157 | 1 |
from __future__ import annotations
UpperCamelCase__ = 1.6021E-19 # units = C
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
_a : str = field(
default=snake_case__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(snake_case__ )} )
_a : str = field(
default=snake_case__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a : int = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_a : int = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_a : int = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_a : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_a : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a__ ( snake_case__ ):
_a : Any = """train"""
_a : Union[str, Any] = """dev"""
class a__ ( snake_case__ ):
_a : SquadDataTrainingArguments
_a : List[SquadFeatures]
_a : Split
_a : bool
def __init__( self , _A , _A , _A = None , _A = Split.train , _A = False , _A = None , _A = "pt" , ):
"""simple docstring"""
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = "v2" if args.version_2_with_negative else "v1"
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["features"]
__lowerCAmelCase = self.old_features.get("dataset" , _A )
__lowerCAmelCase = self.old_features.get("examples" , _A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__lowerCAmelCase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 102 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def get_matched_characters(lowercase ,lowercase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = min(len(_stra ) ,len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase = int(max(0 ,i - limit ) )
_UpperCAmelCase = int(min(i + limit + 1 ,len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase )
_UpperCAmelCase = f'''{_stra[0:_stra.index(lowercase )]} {_stra[_stra.index(lowercase ) + 1:]}'''
return "".join(lowercase )
# matching characters
_UpperCAmelCase = get_matched_characters(lowercase ,lowercase )
_UpperCAmelCase = get_matched_characters(lowercase ,lowercase )
_UpperCAmelCase = len(lowercase )
# transposition
_UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(lowercase ,lowercase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase = 0.0
else:
_UpperCAmelCase = (
1
/ 3
* (
match_count / len(lowercase )
+ match_count / len(lowercase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase = 0
for ca, ca in zip(stra[:4] ,stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 289 | """simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
UpperCAmelCase__ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = 10_00
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,)
return preprocessor
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase = sorted(set(lowercase ) )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = """efficientnet.""" + item[1]
_UpperCAmelCase = """classifier.weight"""
_UpperCAmelCase = """classifier.bias"""
return key_mapping
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = model_classes[model_name](
include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,)
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase = rename_keys(lowercase )
replace_params(lowercase ,lowercase ,lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(lowercase )
_UpperCAmelCase = np.expand_dims(lowercase ,axis=0 )
_UpperCAmelCase = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
UpperCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 289 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__snake_case = 50_00_00
__snake_case ,__snake_case = os.path.split(__file__)
__snake_case = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.map(**UpperCamelCase_ )
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.filter(**UpperCamelCase_ )
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
SCREAMING_SNAKE_CASE__ = generate_example_dataset(
os.path.join(UpperCamelCase_ , 'dataset.arrow' ) , UpperCamelCase_ , num_examples=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=UpperCamelCase_ )
def tokenize(UpperCamelCase_ ):
return tokenizer(examples['text'] )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='numpy' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='pandas' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = filter(UpperCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase_ , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase_ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 169 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''donut-swin'''
__UpperCAmelCase : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict ,_a : Optional[Any]=224 ,_a : List[str]=4 ,_a : Union[str, Any]=3 ,_a : Any=96 ,_a : str=[2, 2, 6, 2] ,_a : str=[3, 6, 12, 24] ,_a : Union[str, Any]=7 ,_a : int=4.0 ,_a : Optional[Any]=True ,_a : Optional[Any]=0.0 ,_a : List[Any]=0.0 ,_a : Dict=0.1 ,_a : List[Any]="gelu" ,_a : str=False ,_a : List[Any]=0.02 ,_a : Dict=1E-5 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : Any = image_size
_a : Any = patch_size
_a : str = num_channels
_a : int = embed_dim
_a : Union[str, Any] = depths
_a : Union[str, Any] = len(_a )
_a : Optional[int] = num_heads
_a : Optional[int] = window_size
_a : List[str] = mlp_ratio
_a : str = qkv_bias
_a : int = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : str = drop_path_rate
_a : List[str] = hidden_act
_a : List[str] = use_absolute_embeddings
_a : int = layer_norm_eps
_a : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a : Optional[int] = int(embed_dim * 2 ** (len(_a ) - 1) )
| 271 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Dict=None ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : List[Any] = self.model.config
else:
_a : Optional[int] = config
_a : List[str] = data_args
_a : List[Any] = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : Tuple = label_smoothed_nll_loss
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Union[str, Any] = ['bias', 'LayerNorm.weight']
_a : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : Any = Adafactor
_a : Dict = {'scale_parameter': False, 'relative_step': False}
else:
_a : Union[str, Any] = AdamW
_a : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : str = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : List[Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : List[str] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Optional[int] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : Dict ,_a : Dict ,_a : Any ,_a : Dict ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : Union[str, Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Any = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = inputs.pop('labels' )
_a, _a : int = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : int = self._prepare_inputs(_a )
_a : Any = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : int = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : int = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Optional[int] = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : Dict = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Union[str, Any] = tensor
return padded_tensor
| 271 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCAmelCase :
def __init__( self : Optional[Any] , _lowercase : Tuple , _lowercase : str=13 , _lowercase : Tuple=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Any=True , _lowercase : Any=True , _lowercase : int=99 , _lowercase : Tuple=32 , _lowercase : str=2 , _lowercase : Union[str, Any]=4 , _lowercase : List[str]=37 , _lowercase : Optional[Any]="gelu" , _lowercase : List[str]=0.1 , _lowercase : List[str]=0.1 , _lowercase : Optional[int]=5_12 , _lowercase : Tuple=16 , _lowercase : Dict=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Any=3 , _lowercase : Dict=4 , _lowercase : str=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = 13
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = 99
__UpperCAmelCase = 32
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 37
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_12
__UpperCAmelCase = 16
__UpperCAmelCase = 2
__UpperCAmelCase = 0.02
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def a ( self : Tuple ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : List[Any] , _lowercase : Dict ):
__UpperCAmelCase = TFRoFormerModel(config=_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[str] , _lowercase : int , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Tuple ):
__UpperCAmelCase = True
__UpperCAmelCase = TFRoFormerForCausalLM(config=_lowercase )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase = model(_lowercase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def a ( self : Optional[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : str , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[str] ):
__UpperCAmelCase = TFRoFormerForMaskedLM(config=_lowercase )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : List[str] , _lowercase : List[str] , _lowercase : int , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFRoFormerForSequenceClassification(config=_lowercase )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : int , _lowercase : str , _lowercase : List[str] , _lowercase : str , _lowercase : List[str] , _lowercase : List[str] ):
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFRoFormerForMultipleChoice(config=_lowercase )
__UpperCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : Dict , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFRoFormerForTokenClassification(config=_lowercase )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : List[str] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : str , _lowercase : Tuple , _lowercase : Tuple ):
__UpperCAmelCase = TFRoFormerForQuestionAnswering(config=_lowercase )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : int ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : Any = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : List[str] = False
a__ : List[Any] = False
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def a ( self : str ):
__UpperCAmelCase = TFRoFormerModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def a ( self : List[str] ):
self.config_tester.run_common_tests()
def a ( self : List[str] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : List[str] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_lowercase )
def a ( self : str ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def a ( self : List[Any] ):
__UpperCAmelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(_lowercase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : Dict ):
__UpperCAmelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(_lowercase )[0]
# TODO Replace vocab size
__UpperCAmelCase = 5_00_00
__UpperCAmelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , _lowercase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCAmelCase = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-4 )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
a__ : List[str] = 1E-4
def a ( self : Optional[int] ):
__UpperCAmelCase = tf.constant([[4, 10]] )
__UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCAmelCase = emba(input_ids.shape )
__UpperCAmelCase = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(_lowercase , _lowercase , atol=self.tolerance )
def a ( self : Dict ):
__UpperCAmelCase = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
__UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__UpperCAmelCase = emba.weight[:3, :5]
tf.debugging.assert_near(_lowercase , _lowercase , atol=self.tolerance )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Optional[Any] = 1E-4
def a ( self : List[str] ):
# 2,12,16,64
__UpperCAmelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__UpperCAmelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCAmelCase = embed_positions([2, 16, 7_68] )[None, None, :, :]
__UpperCAmelCase , __UpperCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
__UpperCAmelCase = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _lowercase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _lowercase , atol=self.tolerance )
| 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowercase__ ( _UpperCAmelCase ):
a_ ='''efficientformer'''
def __init__( self , __UpperCAmelCase = [3, 2, 6, 4] , __UpperCAmelCase = [48, 96, 224, 448] , __UpperCAmelCase = [True, True, True, True] , __UpperCAmelCase = 448 , __UpperCAmelCase = 32 , __UpperCAmelCase = 4 , __UpperCAmelCase = 7 , __UpperCAmelCase = 5 , __UpperCAmelCase = 8 , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 16 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1E-1_2 , __UpperCAmelCase = 224 , __UpperCAmelCase = 1E-0_5 , **__UpperCAmelCase , )-> None:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = depths
lowerCAmelCase__ = mlp_expansion_ratio
lowerCAmelCase__ = downsamples
lowerCAmelCase__ = dim
lowerCAmelCase__ = key_dim
lowerCAmelCase__ = attention_ratio
lowerCAmelCase__ = resolution
lowerCAmelCase__ = pool_size
lowerCAmelCase__ = downsample_patch_size
lowerCAmelCase__ = downsample_stride
lowerCAmelCase__ = downsample_pad
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = num_metaad_blocks
lowerCAmelCase__ = distillation
lowerCAmelCase__ = use_layer_scale
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = image_size
lowerCAmelCase__ = batch_norm_eps
| 340 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
A : Tuple = None
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A : List[str] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
A : List[str] = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
A : Optional[int] = '''▁'''
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = AlbertTokenizer
def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
A__ = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 274 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[int] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class A (__lowerCamelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'luke'
def __init__( self : Any , __lowerCAmelCase : Optional[int]=5_02_67 , __lowerCAmelCase : List[str]=50_00_00 , __lowerCAmelCase : List[Any]=7_68 , __lowerCAmelCase : List[Any]=2_56 , __lowerCAmelCase : str=12 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Dict=30_72 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : int=5_12 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Dict=1e-12 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : Optional[Any]=2 , **__lowerCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
A__ = vocab_size
A__ = entity_vocab_size
A__ = hidden_size
A__ = entity_emb_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_entity_aware_attention
A__ = classifier_dropout
| 357 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
A__ = {}
A__ = job["""started_at"""]
A__ = job["""completed_at"""]
A__ = date_parser.parse(__a )
A__ = date_parser.parse(__a )
A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ = start
A__ = end
A__ = duration_in_min
return job_info
def __lowerCamelCase ( __a :Optional[Any] , __a :List[str]=None ) -> List[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
A : Dict = parser.parse_args()
A : List[Any] = get_job_time(args.workflow_run_id)
A : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 276 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
def __init__( self :List[Any] , _A :Union[str, Any] , _A :str=13 , _A :int=30 , _A :Dict=2 , _A :List[Any]=3 , _A :Union[str, Any]=True , _A :Optional[Any]=True , _A :Optional[int]=32 , _A :Any=5 , _A :int=4 , _A :int=37 , _A :int="gelu" , _A :str=0.1 , _A :Optional[Any]=0.1 , _A :str=10 , _A :str=0.02 , _A :List[Any]=3 , _A :Tuple=0.6 , _A :Tuple=None , ) -> int:
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = mask_ratio
__A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A = (image_size // patch_size) ** 2
__A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase_ ( self :Any , _A :List[str] , _A :int , _A :List[Any] ) -> Any:
'''simple docstring'''
__A = ViTMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :Optional[Any] , _A :int , _A :Union[str, Any] , _A :int ) -> List[Any]:
'''simple docstring'''
__A = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A = model(lowerCAmelCase_ )
__A = (self.image_size // self.patch_size) ** 2
__A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A = 1
__A = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(lowerCAmelCase_ )
__A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase_ ( self :str ) -> Dict:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Any = False
def lowercase_ ( self :Dict ) -> str:
'''simple docstring'''
__A = ViTMAEModelTester(self )
__A = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowercase_ ( self :Optional[int] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowercase_ ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self :Any ) -> str:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowerCAmelCase_ )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase_ ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase_ ( self :Tuple ) -> Dict:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowercase_ ( self :List[str] , _A :int , _A :Tuple , _A :Optional[Any] ) -> Any:
'''simple docstring'''
np.random.seed(2 )
__A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A = torch.from_numpy(lowerCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A = pt_noise
super().check_pt_tf_models(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase_ ( self :List[Any] ) -> Any:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__A = outputs[0].cpu().numpy()
__A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
__A = model_class.from_pretrained(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Make sure we don't have nans
__A = after_outputs[0].cpu().numpy()
__A = 0
__A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase_ ( self :List[Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
pass
@slow
def lowercase_ ( self :str ) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = ViTMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( )-> List[str]:
"""simple docstring"""
__A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
@cached_property
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowercase_ ( self :int ) -> Any:
'''simple docstring'''
np.random.seed(2 )
__A = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCAmelCase_ )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A = ViTMAEConfig()
__A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__A = model(**lowerCAmelCase_ , noise=torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ) )
# verify the logits
__A = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__A = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase_ ) , atol=1E-4 ) )
| 161 | import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = 'sshleifer/student_marian_en_ro_6_1'
_SCREAMING_SNAKE_CASE = 'sshleifer/tiny-mbart'
@require_torch
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> int:
_A = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCAmelCase_ , num_train_epochs=1 , distributed=lowerCAmelCase_ , extra_args_str=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , do_predict=lowerCAmelCase_ , )
_A = TrainerState.load_from_json(os.path.join(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_A = [log for log in logs if """eval_loss""" in log.keys()]
_A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_A = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCAmelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCAmelCase ( self ) -> Dict:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ )
@require_torch_multi_gpu
def UpperCAmelCase ( self ) -> Dict:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> Dict:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowerCAmelCase_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> Tuple:
self.run_seqaseq_quick(
distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowerCAmelCase_ )
@require_apex
@require_torch_gpu
def UpperCAmelCase ( self ) -> int:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_A = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_A = experiments[experiment_id]
_A = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_A = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCAmelCase_ , extra_args_str=data["""extra_args_str"""] )
_A = len(re.findall(lowerCAmelCase_ , cl.err ) )
self.assertEqual(lowerCAmelCase_ , data["""n_matches"""] )
@slow
def UpperCAmelCase ( self ) -> Dict:
_A = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=lowerCAmelCase_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowerCAmelCase_ , )
# Check metrics
_A = TrainerState.load_from_json(os.path.join(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history
_A = [log for log in logs if """eval_loss""" in log.keys()]
_A = eval_metrics[0]
_A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase_ )
# test if do_predict saves generations and metrics
_A = os.listdir(lowerCAmelCase_ )
_A = {os.path.basename(lowerCAmelCase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCAmelCase ( self ) -> Optional[Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCAmelCase_ ) -> Tuple[int, float]:
_A = """--skip_memory_metrics 0"""
_A = self.run_trainer(
max_len=1_28 , model_name=lowerCAmelCase_ , learning_rate=3E-4 , num_train_epochs=1 , optim=lowerCAmelCase_ , distributed=lowerCAmelCase_ , extra_args_str=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , do_predict=lowerCAmelCase_ , n_gpus_to_use=1 , )
# Check metrics
_A = TrainerState.load_from_json(Path(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history
_A = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_A = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_A = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_A = gpu_peak_mem_orig + gpu_alloc_mem_orig
_A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_A = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCAmelCase_ , lowerCAmelCase_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
lowerCAmelCase_ , lowerCAmelCase_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
lowerCAmelCase_ , lowerCAmelCase_ , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3E-3 , lowerCAmelCase_ = "adafactor" , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , ) -> str:
_A = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_A = self.get_auto_remove_tmp_dir()
_A = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCAmelCase_ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCAmelCase_ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_A = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCAmelCase_ )}
'''.split()
_A = """
--do_predict
""".split()
_A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_A = get_gpu_count()
_A = get_torch_dist_unique_port()
_A = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
else:
_A = ["""run_translation.py"""] + args
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
main()
return output_dir
| 180 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""input_features"""]
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple=80 , SCREAMING_SNAKE_CASE : Optional[int]=16_000 , SCREAMING_SNAKE_CASE : Optional[int]=160 , SCREAMING_SNAKE_CASE : str=30 , SCREAMING_SNAKE_CASE : List[Any]=400 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : int=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = n_fft
lowercase__ : Optional[Any] = hop_length
lowercase__ : Union[str, Any] = chunk_length
lowercase__ : Any = chunk_length * sampling_rate
lowercase__ : Optional[Any] = self.n_samples // hop_length
lowercase__ : Any = sampling_rate
lowercase__ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : np.array ):
lowercase__ : Any = spectrogram(
SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase__ : Optional[int] = log_spec[:, :-1]
lowercase__ : List[Any] = np.maximum(SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
lowercase__ : Tuple = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case ( SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : float = 0.0 ):
if attention_mask is not None:
lowercase__ : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE , np.intaa )
lowercase__ : Optional[int] = []
for vector, length in zip(SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
lowercase__ : int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__ : str = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE )
else:
lowercase__ : int = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , **SCREAMING_SNAKE_CASE : Optional[int] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ : Optional[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : List[str] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
lowercase__ : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Union[str, Any] = [np.asarray([raw_speech] ).T]
lowercase__ : str = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase__ : Union[str, Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase__ : List[str] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase__ : Optional[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase__ : Optional[Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase__ : Any = [self._np_extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
lowercase__ : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
lowercase__ : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase__ : Union[str, Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase__ : Union[str, Any] = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
def snake_case ( self : Tuple ):
lowercase__ : str = copy.deepcopy(self.__dict__ )
lowercase__ : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 362 |
def __lowerCamelCase ( lowerCamelCase__ = 1_000 ):
"""simple docstring"""
lowercase__ , lowercase__ : int = 1, 1
lowercase__ : List[Any] = []
for i in range(1 , n + 1 ):
lowercase__ : Dict = prev_numerator + 2 * prev_denominator
lowercase__ : Tuple = prev_numerator + prev_denominator
if len(str(lowerCamelCase__ ) ) > len(str(lowerCamelCase__ ) ):
result.append(lowerCamelCase__ )
lowercase__ : int = numerator
lowercase__ : int = denominator
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.