code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import random
from typing import Any
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
for _ in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase__ = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
lowerCAmelCase__ = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = data[b], data[a]
return data
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
__lowerCAmelCase : List[Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 644 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_UpperCamelCase = logging.get_logger(__name__)
@dataclass
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **snake_case ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ : Any = deprecated_arg[3:]
setattr(self , snake_case , not kwargs.pop(snake_case ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCAmelCase__ : List[Any] = kwargs.pop("torchscript" , self.torchscript )
lowerCAmelCase__ : List[Any] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowerCAmelCase__ : Any = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**snake_case )
__UpperCamelCase : bool = field(default=__magic_name__ , metadata={'help': 'Trace the models using torchscript'} )
__UpperCamelCase : bool = field(default=__magic_name__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__UpperCamelCase : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowerCAmelCase__ : int = torch.device("cpu" )
lowerCAmelCase__ : Union[str, Any] = 0
elif is_torch_tpu_available():
lowerCAmelCase__ : str = xm.xla_device()
lowerCAmelCase__ : int = 0
else:
lowerCAmelCase__ : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase__ : List[str] = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 453 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
from math import sqrt
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCAmelCase = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCAmelCase = False
break
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'status' must been from type bool"
return status
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[int]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCAmelCase = list(range(2 , n + 1 ) )
_UpperCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase__ ) ):
for j in range(i + 1 , len(lowerCAmelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCAmelCase = 0
# filters actual prime numbers.
_UpperCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
_UpperCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase__ ):
ans.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number >= 0, "'number' must been an int and >= 0"
_UpperCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCAmelCase = 2
_UpperCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase__ ):
while quotient != 1:
if is_prime(lowerCAmelCase__ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase = 0
# prime factorization of 'number'
_UpperCAmelCase = prime_factorization(lowerCAmelCase__ )
_UpperCAmelCase = max(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase = 0
# prime factorization of 'number'
_UpperCAmelCase = prime_factorization(lowerCAmelCase__ )
_UpperCAmelCase = min(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def _SCREAMING_SNAKE_CASE ( snake_case ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (number > 2) and is_even(lowerCAmelCase__ )
), "'number' must been an int, even and > 2"
_UpperCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCAmelCase = get_prime_numbers(lowerCAmelCase__ )
_UpperCAmelCase = len(lowerCAmelCase__ )
# run variable for while-loops.
_UpperCAmelCase = 0
_UpperCAmelCase = None
# exit variable. for break up the loops
_UpperCAmelCase = True
while i < len_pn and loop:
_UpperCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (len(lowerCAmelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase = 0
while numbera != 0:
_UpperCAmelCase = numbera % numbera
_UpperCAmelCase = numbera
_UpperCAmelCase = rest
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Any:
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCAmelCase = prime_factorization(lowerCAmelCase__ )
_UpperCAmelCase = prime_factorization(lowerCAmelCase__ )
elif numbera == 1 or numbera == 1:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = max(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCAmelCase = prime_fac_a.count(lowerCAmelCase__ )
_UpperCAmelCase = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(max(lowerCAmelCase__ , lowerCAmelCase__ ) ):
ans *= n
else:
_UpperCAmelCase = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
ans *= n
done.append(lowerCAmelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCAmelCase = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
ans *= n
done.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'number' must been a positive int"
_UpperCAmelCase = 0
_UpperCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase__ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and is_prime(
lowerCAmelCase__ ), "'ans' must been a prime number and from type int"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]:
assert (
is_prime(lowerCAmelCase__ ) and is_prime(lowerCAmelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCAmelCase = p_number_a + 1 # jump to the next number
_UpperCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase__ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase__ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 1), "'n' must been int and >= 1"
_UpperCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase__ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCAmelCase = get_divisors(lowerCAmelCase__ )
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Any:
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCAmelCase = gcd(abs(lowerCAmelCase__ ) , abs(lowerCAmelCase__ ) )
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[str]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
_UpperCAmelCase = ans
ans += fiba
_UpperCAmelCase = tmp
return ans | 518 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_SCREAMING_SNAKE_CASE : str = (3, 9, -11, 0, 7, 5, 1, -1)
_SCREAMING_SNAKE_CASE : List[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A :
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : Node | None
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any]):
_lowercase: Any = None
for i in sorted(_UpperCamelCase , reverse=_UpperCamelCase):
_lowercase: Optional[int] = Node(_UpperCamelCase , self.head)
def __iter__( self : Tuple):
_lowercase: Any = self.head
while node:
yield node.data
_lowercase: List[Any] = node.next_node
def __len__( self : Tuple):
return sum(1 for _ in self)
def __str__( self : List[str]):
return " -> ".join([str(_UpperCamelCase) for node in self])
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return SortedLinkedList(list(lowerCAmelCase__ ) + list(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 226 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class lowerCAmelCase_ :
def __init__( self : Dict ):
lowerCAmelCase__ = False
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
if not self.initialized:
lowerCAmelCase__ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = True
def __snake_case ( self : Optional[Any] ):
self.retriever.index.init_index()
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.retriever._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
if index is not None and index.is_initialized() and len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for worker in self.retrieval_workers
] )
def __snake_case ( self : Tuple ):
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCAmelCase__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCAmelCase__ , lowerCAmelCase__ = ray.get(random_worker.retrieve.remote(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
@classmethod
def __snake_case ( cls : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return super(SCREAMING_SNAKE_CASE_ , cls ).get_tokenizers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def __snake_case ( cls : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = kwargs.pop('''config''' , SCREAMING_SNAKE_CASE_ ) or RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = rag_tokenizer.question_encoder
lowerCAmelCase__ = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase__ = '''custom'''
lowerCAmelCase__ = CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ = cls._build_index(SCREAMING_SNAKE_CASE_ )
return cls(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , retrieval_workers=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
| 668 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a : Tuple = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
_A = AutoTokenizer.from_pretrained("google/mt5-small" )
_A = tokenizer("Hello there" , return_tensors="np" ).input_ids
_A = tokenizer("Hi I am" , return_tensors="np" ).input_ids
_A = shift_tokens_right(__UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_A = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
_A = optax.softmax_cross_entropy(__UpperCAmelCase , onehot(__UpperCAmelCase , logits.shape[-1] ) ).mean()
_A = -(labels.shape[-1] * loss.item())
_A = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 330 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=10 , a__=3 , a__=2 , a__=2 , a__=2 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=0.9 , a__=None , ) -> Union[str, Any]:
A = parent
A = batch_size
A = image_size
A = num_channels
A = patch_size
A = tubelet_size
A = num_frames
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = mask_ratio
A = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A = (image_size // patch_size) ** 2
A = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> List[str]:
A = VideoMAEModel(config=a__ )
model.to(a__ )
model.eval()
A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> List[str]:
A = VideoMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A = torch.ones((self.num_masks,) )
A = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A = mask.expand(self.batch_size , -1 ).bool()
A = model(a__ , a__ )
# model only returns predictions for masked patches
A = mask.sum().item()
A = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> Dict:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCAmelCase = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCAmelCase ( self ) -> Any:
A = VideoMAEModelTester(self )
A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _UpperCAmelCase ( self , a__ , a__ , a__=False ) -> str:
A = copy.deepcopy(a__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A = torch.ones((self.model_tester.num_masks,) )
A = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A = mask.expand(self.model_tester.batch_size , -1 ).bool()
A = bool_masked_pos.to(a__ )
if return_labels:
if model_class in [
*get_values(a__ ),
]:
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _UpperCAmelCase ( self ) -> List[str]:
pass
def _UpperCAmelCase ( self ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = VideoMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if not self.has_attentions:
pass
else:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = self.model_tester.seq_length - self.model_tester.num_masks
A = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A = True
A = False
A = True
A = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(a__ , a__ ) )
A = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(a__ , a__ ) )
A = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A = len(a__ )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 1 , len(a__ ) )
A = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a__ , a__ , a__ ):
A = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(a__ , a__ ) )
A = outputs.hidden_states
A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a__ ) , a__ )
A = self.model_tester.seq_length - self.model_tester.num_masks
A = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
A = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> Tuple:
A = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
a__ )
A = self.default_image_processor
A = prepare_video()
A = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
A = model(**a__ )
# verify the logits
A = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a__ )
A = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(a__ )
A = self.default_image_processor
A = prepare_video()
A = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# add boolean mask, indicating which patches to mask
A = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A = torch.load(a__ )
# forward pass
with torch.no_grad():
A = model(**a__ )
# verify the logits
A = torch.Size([1, 1408, 1536] )
A = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=a__ )
self.assertEqual(outputs.logits.shape , a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A = torch.tensor([0.51_42] , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=a__ ).to(
a__ )
with torch.no_grad():
A = model(**a__ )
A = torch.tensor(torch.tensor([0.64_69] ) , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1e-4 ) )
| 641 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCAmelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _snake_case ( __snake_case ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_VOCAB_FILES_MAP
a = ['input_ids', 'attention_mask']
a = []
a = []
def __init__( self : List[Any] , _A : List[str] , _A : str="<s>" , _A : Dict="</s>" , _A : List[Any]="</s>" , _A : Dict="<s>" , _A : int="<unk>" , _A : str="<pad>" , _A : List[Any]="<mask>" , _A : Any=None , _A : List[Any]=None , _A : int=None , _A : Dict = None , _A : List[str]=None , **_A : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
_SCREAMING_SNAKE_CASE : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_A))
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : Any = len(self.sp_model)
_SCREAMING_SNAKE_CASE : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A)
}
_SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
_SCREAMING_SNAKE_CASE : Any = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_SCREAMING_SNAKE_CASE : str = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
_SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else """en_XX"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.lang_code_to_id[self._src_lang]
_SCREAMING_SNAKE_CASE : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , _A : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
_SCREAMING_SNAKE_CASE : Tuple = {}
_SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCAmelCase ( self : int):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowerCAmelCase ( self : str , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCAmelCase ( self : int , _A : str , _A : Union[str, Any] = None , _A : Optional[Any] = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1] * len(self.prefix_tokens)
_SCREAMING_SNAKE_CASE : Dict = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(_A)) + suffix_ones
return prefix_ones + ([0] * len(_A)) + ([0] * len(_A)) + suffix_ones
def _lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : Optional[int] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCAmelCase ( self : Optional[Any] , _A : List[Any] , _A : str = None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCAmelCase ( self : Any , _A : List[Any] , _A : Any , _A : Optional[Any] , _A : Union[str, Any] , **_A : List[str]):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
_SCREAMING_SNAKE_CASE : Optional[Any] = src_lang
_SCREAMING_SNAKE_CASE : str = self(_A , add_special_tokens=_A , return_tensors=_A , **_A)
_SCREAMING_SNAKE_CASE : Dict = self.convert_tokens_to_ids(_A)
_SCREAMING_SNAKE_CASE : List[str] = tgt_lang_id
return inputs
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int]):
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A)
def _lowerCAmelCase ( self : Optional[Any] , _A : str):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.PieceToId(_A)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self : Optional[int] , _A : int):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = """""".join(_A).replace(_A , """ """).strip()
return out_string
def _lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : int = None):
"""simple docstring"""
if not os.path.isdir(_A):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
_SCREAMING_SNAKE_CASE : Any = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , """wb""") as fi:
_SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
def _lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Union[str, Any] = "en_XX" , _A : Tuple = None , _A : Tuple = "ro_RO" , **_A : Union[str, Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = src_lang
_SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCAmelCase ( self : str , _A : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.lang_code_to_id[src_lang]
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = [self.eos_token_id, self.cur_lang_code]
def _lowerCAmelCase ( self : List[str] , _A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.lang_code_to_id[lang]
_SCREAMING_SNAKE_CASE : Any = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 338 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase_ :
def __init__( self : Union[str, Any] , _lowercase : Any , _lowercase : List[str]=1_3 , _lowercase : Tuple=7 , _lowercase : Any=True , _lowercase : int=True , _lowercase : List[str]=True , _lowercase : List[str]=True , _lowercase : Tuple=9_9 , _lowercase : Tuple=3_2 , _lowercase : Any=2 , _lowercase : Optional[int]=4 , _lowercase : str=3_7 , _lowercase : str="gelu" , _lowercase : int=0.1 , _lowercase : List[Any]=0.1 , _lowercase : List[str]=5_1_2 , _lowercase : int=1_6 , _lowercase : Optional[Any]=2 , _lowercase : int=0.02 , _lowercase : Optional[Any]=3 , _lowercase : Union[str, Any]=4 , _lowercase : List[str]=None , _lowercase : int=0 , ):
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : int = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : Tuple = num_choices
lowerCAmelCase__ : List[str] = scope
lowerCAmelCase__ : Dict = projection_dim
def _lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Any = None
if self.use_labels:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
lowerCAmelCase__ : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : List[Any] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : str ):
lowerCAmelCase__ : Dict = TFDPRContextEncoder(config=_lowercase )
lowerCAmelCase__ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowerCAmelCase__ : str = model(_lowercase , token_type_ids=_lowercase )
lowerCAmelCase__ : Any = model(_lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowerCAmelCase ( self : int , _lowercase : Any , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : str ):
lowerCAmelCase__ : Any = TFDPRQuestionEncoder(config=_lowercase )
lowerCAmelCase__ : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowerCAmelCase__ : List[str] = model(_lowercase , token_type_ids=_lowercase )
lowerCAmelCase__ : Dict = model(_lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowerCAmelCase ( self : List[Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Tuple , _lowercase : Tuple ):
lowerCAmelCase__ : int = TFDPRReader(config=_lowercase )
lowerCAmelCase__ : Dict = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase__ : Optional[Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowercase_ ( a_ , a_ , unittest.TestCase ):
__magic_name__ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__magic_name__ : Optional[int] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
__magic_name__ : str = False
__magic_name__ : Any = False
__magic_name__ : int = False
__magic_name__ : Dict = False
__magic_name__ : Any = False
def _lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = TFDPRModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_lowercase )
def _lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_lowercase )
def _lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_lowercase )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = TFDPRContextEncoder.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = TFDPRContextEncoder.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = TFDPRQuestionEncoder.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = TFDPRReader.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_tf
class lowercase_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Any = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase__ : Optional[int] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ : List[Any] = model(_lowercase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ : Union[str, Any] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
SCREAMING_SNAKE_CASE : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
SCREAMING_SNAKE_CASE : str = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase_( ) -> List[str]:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
benchmark()
| 89 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class a_ ( __UpperCamelCase ):
def __get__( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
lowerCAmelCase__ = """__cached_""" + self.fget.__name__
lowerCAmelCase__ = getattr(snake_case__ , snake_case__ , snake_case__ )
if cached is None:
lowerCAmelCase__ = self.fget(snake_case__ )
setattr(snake_case__ , snake_case__ , snake_case__ )
return cached
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ , np.ndarray )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return isinstance(lowerCAmelCase__ , np.ndarray )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return _is_numpy(lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase__ , torch.Tensor )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase__ , torch.device )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ , torch.dtype )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCAmelCase__ , tf.Tensor )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ , jnp.ndarray )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class a_ ( __UpperCamelCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(snake_case__ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase__ = getattr(self , class_fields[0].name )
lowerCAmelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = first_field.items()
lowerCAmelCase__ = True
else:
try:
lowerCAmelCase__ = iter(snake_case__ )
lowerCAmelCase__ = True
except TypeError:
lowerCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case__ ):
if (
not isinstance(snake_case__ , (list, tuple) )
or not len(snake_case__ ) == 2
or not isinstance(element[0] , snake_case__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase__ = element[1]
elif first_field is not None:
lowerCAmelCase__ = first_field
else:
for field in class_fields:
lowerCAmelCase__ = getattr(self , field.name )
if v is not None:
lowerCAmelCase__ = v
def __delitem__( self : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *snake_case__ : Tuple , **snake_case__ : List[Any] ):
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ):
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _SCREAMING_SNAKE_CASE ( self : str , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : int , snake_case__ : Optional[Any] ):
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case__ , snake_case__ )
super().__setattr__(snake_case__ , snake_case__ )
def __setitem__( self : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple ):
# Will raise a KeyException if needed
super().__setitem__(snake_case__ , snake_case__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
return tuple(self[k] for k in self.keys() )
class a_ ( __UpperCamelCase , __UpperCamelCase ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , snake_case__ : Optional[int] ):
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = 'longest'
UpperCamelCase_ : Dict = 'max_length'
UpperCamelCase_ : Optional[int] = 'do_not_pad'
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Any = 'pt'
UpperCamelCase_ : Union[str, Any] = 'tf'
UpperCamelCase_ : int = 'np'
UpperCamelCase_ : List[str] = 'jax'
class a_ :
def __init__( self : int , snake_case__ : Dict ):
lowerCAmelCase__ = context_managers
lowerCAmelCase__ = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(snake_case__ )
def __exit__( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
self.stack.__exit__(*snake_case__ , **snake_case__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = infer_framework(lowerCAmelCase__ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = model_class.__name__
lowerCAmelCase__ = infer_framework(lowerCAmelCase__ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = "" , lowerCamelCase__ = "." ):
"""simple docstring"""
def _flatten_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ):
for k, v in d.items():
lowerCAmelCase__ = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
@contextmanager
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ , (tuple, list) ):
lowerCAmelCase__ = [f"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ = f"""{repo_id}--{value}"""
return auto_map
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
for base_class in inspect.getmro(lowerCAmelCase__ ):
lowerCAmelCase__ = base_class.__module__
lowerCAmelCase__ = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 644 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCamelCase = 3_0_0 # TEMPERATURE (unit = K)
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , ) -> List[Any]:
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 453 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
_UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_UpperCAmelCase = torch.load(hf_hub_download(repo_id=lowerCAmelCase__ , filename="""pytorch_model.bin""" ) )
_UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_UpperCAmelCase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_UpperCAmelCase = tensor_value
_UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase__ , config=lowerCAmelCase__ , state_dict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
# convert tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 518 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: str = torch.exp(lowerCAmelCase__ )
_lowercase: Union[str, Any] = torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
_lowercase: Any = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : Tuple):
super().__init__()
_lowercase: int = config.output_attentions
_lowercase: List[str] = config.output_hidden_states
_lowercase: List[Any] = nn.ModuleList([BertLayer(_UpperCamelCase) for _ in range(config.num_hidden_layers)])
_lowercase: List[Any] = nn.ModuleList([BertHighway(_UpperCamelCase) for _ in range(config.num_hidden_layers)])
_lowercase: Optional[Any] = [-1 for _ in range(config.num_hidden_layers)]
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : int):
if (type(_UpperCamelCase) is float) or (type(_UpperCamelCase) is int):
for i in range(len(self.early_exit_entropy)):
_lowercase: Optional[int] = x
else:
_lowercase: List[str] = x
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : Any):
_lowercase: Any = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[Any]=None , ):
_lowercase: Optional[int] = ()
_lowercase: List[Any] = ()
_lowercase: Dict = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
_lowercase: List[str] = all_hidden_states + (hidden_states,)
_lowercase: Dict = layer_module(
_UpperCamelCase , _UpperCamelCase , head_mask[i] , _UpperCamelCase , _UpperCamelCase)
_lowercase: Dict = layer_outputs[0]
if self.output_attentions:
_lowercase: int = all_attentions + (layer_outputs[1],)
_lowercase: List[str] = (hidden_states,)
if self.output_hidden_states:
_lowercase: Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase: int = current_outputs + (all_attentions,)
_lowercase: str = self.highway[i](_UpperCamelCase)
# logits, pooled_output
if not self.training:
_lowercase: Tuple = highway_exit[0]
_lowercase: str = entropy(_UpperCamelCase)
_lowercase: Optional[int] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase: Tuple = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase: Union[str, Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCamelCase , i + 1)
else:
_lowercase: Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase: Tuple = all_hidden_states + (hidden_states,)
_lowercase: str = (hidden_states,)
if self.output_hidden_states:
_lowercase: Optional[int] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase: Tuple = outputs + (all_attentions,)
_lowercase: Tuple = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , lowerCamelCase_ , )
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Any):
super().__init__(_UpperCamelCase)
_lowercase: Any = config
_lowercase: List[str] = BertEmbeddings(_UpperCamelCase)
_lowercase: List[str] = DeeBertEncoder(_UpperCamelCase)
_lowercase: Dict = BertPooler(_UpperCamelCase)
self.init_weights()
def UpperCAmelCase__ ( self : Optional[int]):
self.encoder.init_highway_pooler(self.pooler)
def UpperCAmelCase__ ( self : Dict):
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : Dict):
_lowercase: Optional[int] = value
def UpperCAmelCase__ ( self : int , _UpperCamelCase : List[str]):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCamelCase)
@add_start_docstrings_to_model_forward(_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : str=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
_lowercase: Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
_lowercase: int = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
_lowercase: Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase: Union[str, Any] = torch.ones(_UpperCamelCase , device=_UpperCamelCase)
if encoder_attention_mask is None:
_lowercase: List[str] = torch.ones(_UpperCamelCase , device=_UpperCamelCase)
if token_type_ids is None:
_lowercase: List[str] = torch.zeros(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase: Any = self.get_extended_attention_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase: List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase: Optional[Any] = encoder_attention_mask[:, None, None, :]
_lowercase: Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
_lowercase: Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase: Dict = self.get_head_mask(_UpperCamelCase , self.config.num_hidden_layers)
_lowercase: List[str] = self.embeddings(
input_ids=_UpperCamelCase , position_ids=_UpperCamelCase , token_type_ids=_UpperCamelCase , inputs_embeds=_UpperCamelCase)
_lowercase: List[Any] = self.encoder(
_UpperCamelCase , attention_mask=_UpperCamelCase , head_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
_lowercase: Optional[int] = encoder_outputs[0]
_lowercase: Optional[int] = self.pooler(_UpperCamelCase)
_lowercase: List[str] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[str]):
_lowercase: int = message
_lowercase: Optional[int] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Optional[int]):
super().__init__()
_lowercase: Optional[int] = BertPooler(_UpperCamelCase)
_lowercase: Dict = nn.Dropout(config.hidden_dropout_prob)
_lowercase: int = nn.Linear(config.hidden_size , config.num_labels)
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Any):
# Pooler
_lowercase: str = encoder_outputs[0]
_lowercase: Optional[Any] = self.pooler(_UpperCamelCase)
# "return" pooler_output
# BertModel
_lowercase: List[Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase: int = bmodel_output[1]
_lowercase: Tuple = self.dropout(_UpperCamelCase)
_lowercase: Any = self.classifier(_UpperCamelCase)
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. """ , lowerCamelCase_ , )
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Dict):
super().__init__(_UpperCamelCase)
_lowercase: Tuple = config.num_labels
_lowercase: Optional[int] = config.num_hidden_layers
_lowercase: int = DeeBertModel(_UpperCamelCase)
_lowercase: Optional[Any] = nn.Dropout(config.hidden_dropout_prob)
_lowercase: str = nn.Linear(config.hidden_size , self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCamelCase)
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : str=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : str=-1 , _UpperCamelCase : int=False , ):
_lowercase: List[str] = self.num_layers
try:
_lowercase: Dict = self.bert(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , position_ids=_UpperCamelCase , head_mask=_UpperCamelCase , inputs_embeds=_UpperCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase: Optional[Any] = outputs[1]
_lowercase: Optional[int] = self.dropout(_UpperCamelCase)
_lowercase: Optional[int] = self.classifier(_UpperCamelCase)
_lowercase: Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase: Optional[int] = e.message
_lowercase: Optional[int] = e.exit_layer
_lowercase: List[Any] = outputs[0]
if not self.training:
_lowercase: int = entropy(_UpperCamelCase)
_lowercase: List[str] = []
_lowercase: int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase: Optional[Any] = MSELoss()
_lowercase: Optional[int] = loss_fct(logits.view(-1) , labels.view(-1))
else:
_lowercase: List[str] = CrossEntropyLoss()
_lowercase: int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
_lowercase: Dict = []
for highway_exit in outputs[-1]:
_lowercase: Any = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCamelCase)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowercase: Optional[Any] = MSELoss()
_lowercase: Dict = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
_lowercase: Dict = CrossEntropyLoss()
_lowercase: List[str] = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(_UpperCamelCase)
if train_highway:
_lowercase: Any = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowercase: str = (loss,) + outputs
if not self.training:
_lowercase: List[str] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase: Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 226 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class a ( unittest.TestCase ):
snake_case_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def A_ ( self : Dict ):
snake_case_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
snake_case_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
snake_case_ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
snake_case_ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
snake_case_ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
snake_case_ = text_classifier('''This is great !''' , return_all_scores=lowercase_ )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
snake_case_ = text_classifier('''This is great !''' , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
snake_case_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
snake_case_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def A_ ( self : Tuple ):
import torch
snake_case_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
snake_case_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def A_ ( self : List[Any] ):
snake_case_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
snake_case_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def A_ ( self : Tuple ):
snake_case_ = pipeline('''text-classification''' )
snake_case_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
snake_case_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
snake_case_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def A_ ( self : str ):
snake_case_ = pipeline('''text-classification''' , framework='''tf''' )
snake_case_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
snake_case_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
snake_case_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
snake_case_ = TextClassificationPipeline(model=lowercase_ , tokenizer=lowercase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def A_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Any ):
snake_case_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
snake_case_ = '''HuggingFace is in'''
snake_case_ = text_classifier(lowercase_ )
self.assertEqual(nested_simplify(lowercase_ ) , [{'''label''': ANY(lowercase_ ), '''score''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
snake_case_ = ['''HuggingFace is in ''', '''Paris is in France''']
snake_case_ = text_classifier(lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [{'''label''': ANY(lowercase_ ), '''score''': ANY(lowercase_ )}, {'''label''': ANY(lowercase_ ), '''score''': ANY(lowercase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
snake_case_ = text_classifier(lowercase_ , top_k=lowercase_ )
snake_case_ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase_ ) , [[{'''label''': ANY(lowercase_ ), '''score''': ANY(lowercase_ )}] * N, [{'''label''': ANY(lowercase_ ), '''score''': ANY(lowercase_ )}] * N] , )
snake_case_ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
snake_case_ = text_classifier(lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , {'''label''': ANY(lowercase_ ), '''score''': ANY(lowercase_ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
snake_case_ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowercase_ ):
text_classifier(lowercase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
snake_case_ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{'''label''': ANY(lowercase_ ), '''score''': ANY(lowercase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 640 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=False ):
'''simple docstring'''
_A = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
_A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : str=32 , __UpperCAmelCase : int=32 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : Optional[Any]=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = embedding_size
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = TFMobileBertModel(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = model(__UpperCAmelCase )
_A = [input_ids, input_mask]
_A = model(__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = TFMobileBertForMaskedLM(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = TFMobileBertForNextSentencePrediction(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = TFMobileBertForPreTraining(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.num_labels
_A = TFMobileBertForSequenceClassification(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.num_choices
_A = TFMobileBertForMultipleChoice(config=__UpperCAmelCase )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = self.num_labels
_A = TFMobileBertForTokenClassification(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = TFMobileBertForQuestionAnswering(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = TFMobileBertModelTest.TFMobileBertModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_A = TFMobileBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(__UpperCAmelCase )[0]
_A = [1, 6, 30522]
self.assertEqual(output.shape , __UpperCAmelCase )
_A = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 330 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : Optional[int] = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 641 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_()-> str:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowerCAmelCase__ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowerCAmelCase__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowerCAmelCase__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowerCAmelCase__ , default="""data/dump""" , help="""The dump file prefix.""" )
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
_SCREAMING_SNAKE_CASE : List[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
_SCREAMING_SNAKE_CASE : int = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
_SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
_SCREAMING_SNAKE_CASE : int = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
_SCREAMING_SNAKE_CASE : int = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
_SCREAMING_SNAKE_CASE : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
_SCREAMING_SNAKE_CASE : List[str] = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F"""{len(lowerCAmelCase__ )} examples to process.""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Tuple = 10_000
_SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
_SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{bos} {text.strip()} {sep}"""
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
_SCREAMING_SNAKE_CASE : int = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
_SCREAMING_SNAKE_CASE : str = time.time()
logger.info("""Finished binarization""" )
logger.info(F"""{len(lowerCAmelCase__ )} examples processed.""" )
_SCREAMING_SNAKE_CASE : Optional[int] = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
_SCREAMING_SNAKE_CASE : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
_SCREAMING_SNAKE_CASE : Any = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowerCAmelCase__ , """wb""" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 338 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase = {
"Salesforce/codegen-350M-mono": 2_048,
}
class lowercase_ ( a_ ):
__magic_name__ : List[str] = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = ['input_ids', 'attention_mask']
__magic_name__ : List[str] = CodeGenTokenizer
def __init__( self : Union[str, Any] , _lowercase : str=None , _lowercase : Tuple=None , _lowercase : Any=None , _lowercase : List[Any]="<|endoftext|>" , _lowercase : int="<|endoftext|>" , _lowercase : Union[str, Any]="<|endoftext|>" , _lowercase : Dict=False , **_lowercase : str , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
if kwargs.pop("add_bos_token" , _lowercase ):
lowerCAmelCase__ : Any = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
lowerCAmelCase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowercase ) != add_prefix_space:
lowerCAmelCase__ : int = getattr(_lowercase , pre_tok_state.pop("type" ) )
lowerCAmelCase__ : int = add_prefix_space
lowerCAmelCase__ : Optional[Any] = pre_tok_class(**_lowercase )
lowerCAmelCase__ : Union[str, Any] = add_prefix_space
def _lowerCAmelCase ( self : Any , *_lowercase : int , **_lowercase : Optional[Any] ):
lowerCAmelCase__ : Any = kwargs.get("is_split_into_words" , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def _lowerCAmelCase ( self : Union[str, Any] , *_lowercase : Union[str, Any] , **_lowercase : Union[str, Any] ):
lowerCAmelCase__ : Any = kwargs.get("is_split_into_words" , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowercase , **_lowercase )
def _lowerCAmelCase ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] = None ):
lowerCAmelCase__ : str = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def _lowerCAmelCase ( self : Optional[int] , _lowercase : Optional[int] , _lowercase : Optional[int] = False , _lowercase : Union[str, Any] = None , _lowercase : Union[str, Any] = None , **_lowercase : int , ):
lowerCAmelCase__ : Union[str, Any] = super().decode(
token_ids=_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , **_lowercase , )
if truncate_before_pattern is not None and len(_lowercase ) > 0:
lowerCAmelCase__ : Union[str, Any] = self.truncate(_lowercase , _lowercase )
return decoded_text
def _lowerCAmelCase ( self : str , _lowercase : List[Any] , _lowercase : Optional[int] ):
def find_re(_lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[str] ):
lowerCAmelCase__ : int = pattern.search(_lowercase , _lowercase )
return m.start() if m else -1
lowerCAmelCase__ : Optional[int] = [re.compile(_lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCAmelCase__ : Optional[int] = list(re.finditer("^print" , _lowercase , re.MULTILINE ) )
if len(_lowercase ) > 1:
lowerCAmelCase__ : str = completion[: prints[1].start()]
lowerCAmelCase__ : Any = list(re.finditer("^def" , _lowercase , re.MULTILINE ) )
if len(_lowercase ) > 1:
lowerCAmelCase__ : Any = completion[: defs[1].start()]
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Tuple = [
pos for pos in [find_re(_lowercase , _lowercase , _lowercase ) for terminal in terminals] if pos != -1
]
if len(_lowercase ) > 0:
return completion[: min(_lowercase )]
else:
return completion
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
import os
from pathlib import Path
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowercase : List[Any] = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
_lowercase : List[Any] = F'''{src_lang}-{tgt_lang}'''
_lowercase : Any = F'''\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'''
model_card_dir.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
_lowercase : List[str] = os.path.join(lowerCAmelCase__ , 'README.md' )
print(F'''Generating {path}''' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCAmelCase__ )
# make sure we are under the root of the project
SCREAMING_SNAKE_CASE : List[Any] = Path(__file__).resolve().parent.parent.parent
SCREAMING_SNAKE_CASE : Optional[Any] = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
SCREAMING_SNAKE_CASE : List[Any] = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 89 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""String lengths must match!""" )
lowerCAmelCase__ = 0
for chara, chara in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
lowerCAmelCase__ : Optional[int] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = F"""\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n """.split()
lowerCAmelCase__ : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(snake_case , env=os.environ.copy() )
| 453 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
_UpperCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCAmelCase = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case="facebook/mbart-large-en-ro" , snake_case=False , snake_case=False ) -> Tuple:
_UpperCAmelCase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(lowerCAmelCase__ )
_UpperCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_UpperCAmelCase = MBartConfig.from_pretrained(lowerCAmelCase__ , vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
_UpperCAmelCase = """relu"""
_UpperCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
_UpperCAmelCase = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
_UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a = parser.parse_args()
a = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 518 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_SCREAMING_SNAKE_CASE : Dict = pytest.mark.integration
@require_faiss
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple):
_lowercase: str = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(_UpperCamelCase) for x in np.arange(30).tolist()]})
return dset
def UpperCAmelCase__ ( self : Union[str, Any]):
import faiss
_lowercase: Any = self._create_dummy_dataset()
_lowercase: Dict = dset.map(
lambda _UpperCamelCase , _UpperCamelCase: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=_UpperCamelCase , keep_in_memory=_UpperCamelCase)
_lowercase: Dict = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT)
_lowercase , _lowercase: Union[str, Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["filename"][0] , "my_name-train_29")
dset.drop_index("vecs")
def UpperCAmelCase__ ( self : List[Any]):
import faiss
_lowercase: Optional[Any] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowercase , _lowercase: List[Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["filename"][0] , "my_name-train_29")
def UpperCAmelCase__ ( self : List[str]):
import faiss
_lowercase: Tuple = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name)
dset.load_faiss_index("vecs2" , tmp_file.name)
os.unlink(tmp_file.name)
_lowercase , _lowercase: List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["filename"][0] , "my_name-train_29")
def UpperCAmelCase__ ( self : Any):
_lowercase: str = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="vecs")
dset.drop_index("vecs")
self.assertRaises(_UpperCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa)))
def UpperCAmelCase__ ( self : str):
from elasticsearch import Elasticsearch
_lowercase: Union[str, Any] = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create") as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
_lowercase: Any = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30)
_lowercase: int = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_lowercase: List[str] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=_UpperCamelCase)
_lowercase , _lowercase: Dict = dset.get_nearest_examples("filename" , "my_name-train_29")
self.assertEqual(examples["filename"][0] , "my_name-train_29")
@require_faiss
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any]):
import faiss
_lowercase: Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 10)
# single query
_lowercase: Optional[int] = np.zeros(5 , dtype=np.floataa)
_lowercase: Optional[Any] = 1
_lowercase , _lowercase: Union[str, Any] = index.search(_UpperCamelCase)
self.assertRaises(_UpperCamelCase , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
_lowercase: Tuple = np.eye(5 , dtype=np.floataa)[::-1]
_lowercase , _lowercase: Union[str, Any] = index.search_batch(_UpperCamelCase)
self.assertRaises(_UpperCamelCase , index.search_batch , queries[0])
_lowercase: Optional[int] = [scores[0] for scores in total_scores]
_lowercase: Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , _UpperCamelCase)
def UpperCAmelCase__ ( self : Any):
import faiss
_lowercase: Optional[Any] = FaissIndex(string_factory="Flat")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
_lowercase: Tuple = FaissIndex(string_factory="LSH")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(_UpperCamelCase):
_lowercase: Optional[int] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5))
def UpperCAmelCase__ ( self : Tuple):
import faiss
_lowercase: Dict = faiss.IndexFlat(5)
_lowercase: str = FaissIndex(custom_index=_UpperCamelCase)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def UpperCAmelCase__ ( self : Optional[int]):
import faiss
_lowercase: Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase) as tmp_file:
index.save(tmp_file.name)
_lowercase: Tuple = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
_lowercase: Union[str, Any] = np.zeros(5 , dtype=np.floataa)
_lowercase: Union[str, Any] = 1
_lowercase , _lowercase: Optional[Any] = index.search(_UpperCamelCase)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def __lowerCAmelCase ( __magic_name__ ):
import faiss
_lowercase: List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowercase: Any = "index.faiss"
_lowercase: Dict = f"mock://{index_name}"
index.save(lowerCAmelCase__ , storage_options=mockfs.storage_options )
_lowercase: Optional[int] = FaissIndex.load(lowerCAmelCase__ , storage_options=mockfs.storage_options )
_lowercase: List[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: List[Any] = 1
_lowercase , _lowercase: Any = index.search(lowerCAmelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any]):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create") as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
_lowercase: Any = Elasticsearch()
_lowercase: Union[str, Any] = {"acknowledged": True}
_lowercase: Tuple = ElasticSearchIndex(es_client=_UpperCamelCase)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(["foo", "bar", "foobar"])
# single query
_lowercase: Tuple = "foo"
_lowercase: Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_lowercase , _lowercase: Dict = index.search(_UpperCamelCase)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
_lowercase: Tuple = "foo"
_lowercase: Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_lowercase , _lowercase: Dict = index.search(_UpperCamelCase , request_timeout=30)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
_lowercase: int = ["foo", "bar", "foobar"]
_lowercase: List[str] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_lowercase , _lowercase: Dict = index.search_batch(_UpperCamelCase)
_lowercase: Optional[int] = [scores[0] for scores in total_scores]
_lowercase: Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([1, 1, 1] , _UpperCamelCase)
# batched queries with timeout
_lowercase: Dict = ["foo", "bar", "foobar"]
_lowercase: Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_lowercase , _lowercase: str = index.search_batch(_UpperCamelCase , request_timeout=30)
_lowercase: Any = [scores[0] for scores in total_scores]
_lowercase: Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([1, 1, 1] , _UpperCamelCase)
| 226 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = str(lowerCAmelCase__ )
return n == n[::-1]
def lowerCAmelCase_ (lowercase__ : List[Any] = 1_00_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 668 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
snake_case_ = 'encoder-decoder'
snake_case_ = True
def __init__( self : Dict , **lowercase_ : Tuple ):
super().__init__(**lowercase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ = kwargs.pop('''encoder''' )
snake_case_ = encoder_config.pop('''model_type''' )
snake_case_ = kwargs.pop('''decoder''' )
snake_case_ = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ = AutoConfig.for_model(lowercase_ , **lowercase_ )
snake_case_ = AutoConfig.for_model(lowercase_ , **lowercase_ )
snake_case_ = True
@classmethod
def A_ ( cls : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , **lowercase_ : Any ):
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ = True
snake_case_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.encoder.to_dict()
snake_case_ = self.decoder.to_dict()
snake_case_ = self.__class__.model_type
return output
| 640 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowercase ( __lowercase , __lowercase , __lowercase=[] ) -> Optional[int]:
'''simple docstring'''
_A = size[0] - overlap_pixels * 2
_A = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_A = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_A = np.pad(lowerCAmelCase__ , mode="linear_ramp" , pad_width=lowerCAmelCase__ , end_values=0 )
if "l" in remove_borders:
_A = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_A = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_A = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_A = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
return max(lowerCAmelCase__ , min(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Dict:
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = list(lowerCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_A = clamp_rect(lowerCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase__ , (original_slice, 0) )
return result
def __lowercase ( __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_A = tile.crop(lowerCAmelCase__ )
return tile
def __lowercase ( __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = n % d
return n - divisor
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int = 350 , ):
'''simple docstring'''
super().__init__(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , max_noise_level=__UpperCAmelCase , )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_A = add_overlap_rect(__UpperCAmelCase , __UpperCAmelCase , image.size )
_A = image.crop(__UpperCAmelCase )
_A = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_A = translated_slice_x - (original_image_slice / 2)
_A = max(0 , __UpperCAmelCase )
_A = squeeze_tile(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = to_input.size
_A = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_A = super(__UpperCAmelCase , self ).__call__(image=__UpperCAmelCase , **__UpperCAmelCase ).images[0]
_A = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_A = unsqueeze_tile(__UpperCAmelCase , __UpperCAmelCase )
_A = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_A = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
_A = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__UpperCAmelCase ) , mode="L" , )
final_image.paste(
__UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] = 75 , __UpperCAmelCase : Optional[int] = 9.0 , __UpperCAmelCase : List[Any] = 50 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str] = 1 , __UpperCAmelCase : List[str] = 0.0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : Dict = None , __UpperCAmelCase : Optional[Any] = 1 , __UpperCAmelCase : List[Any] = 128 , __UpperCAmelCase : Optional[Any] = 32 , __UpperCAmelCase : Tuple = 32 , ):
'''simple docstring'''
_A = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
_A = math.ceil(image.size[0] / tile_size )
_A = math.ceil(image.size[1] / tile_size )
_A = tcx * tcy
_A = 0
for y in range(__UpperCAmelCase ):
for x in range(__UpperCAmelCase ):
self._process_tile(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prompt=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , noise_level=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
_A = "stabilityai/stable-diffusion-x4-upscaler"
_A = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase__ , revision="fp16" , torch_dtype=torch.floataa )
_A = pipe.to("cuda" )
_A = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(__lowercase ):
print(F'''progress: {obj['progress']:.4f}''' )
obj["image"].save("diffusers_library_progress.jpg" )
_A = pipe(image=lowerCAmelCase__ , prompt="Black font, white background, vector" , noise_level=40 , callback=lowerCAmelCase__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 330 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = StableDiffusionXLImgaImgPipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=a__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
A = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
A = CLIPTextModel(a__ )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=a__ )
A = CLIPTextModelWithProjection(a__ )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=a__ )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _UpperCAmelCase ( self , a__ , a__=0 ) -> List[str]:
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
A = image / 2 + 0.5
if str(a__ ).startswith("""mps""" ):
A = torch.manual_seed(a__ )
else:
A = torch.Generator(device=a__ ).manual_seed(a__ )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionXLImgaImgPipeline(**a__ )
A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs(a__ )
A = sd_pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Tuple:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> str:
A = self.get_dummy_components()
A = StableDiffusionXLImgaImgPipeline(**a__ )
A = sd_pipe.to(a__ )
A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
# forward without prompt embeds
A = self.get_dummy_inputs(a__ )
A = 3 * ["""this is a negative prompt"""]
A = negative_prompt
A = 3 * [inputs["""prompt"""]]
A = sd_pipe(**a__ )
A = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
A = self.get_dummy_inputs(a__ )
A = 3 * ["""this is a negative prompt"""]
A = 3 * [inputs.pop("""prompt""" )]
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) = sd_pipe.encode_prompt(a__ , negative_prompt=a__ )
A = sd_pipe(
**a__ , prompt_embeds=a__ , negative_prompt_embeds=a__ , pooled_prompt_embeds=a__ , negative_pooled_prompt_embeds=a__ , )
A = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self , a__ , a__="cpu" , a__=torch.floataa , a__=0 ) -> List[Any]:
A = torch.Generator(device=a__ ).manual_seed(a__ )
A = np.random.RandomState(a__ ).standard_normal((1, 4, 64, 64) )
A = torch.from_numpy(a__ ).to(device=a__ , dtype=a__ )
A = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
A = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_inputs(a__ )
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 641 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : List[Any]=7 , _A : str=3 , _A : List[str]=1_8 , _A : Optional[Any]=3_0 , _A : Dict=4_0_0 , _A : Any=True , _A : List[str]=3_2 , _A : Optional[Any]=True , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
_SCREAMING_SNAKE_CASE : Tuple = min_resolution
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
_SCREAMING_SNAKE_CASE : Any = size_divisor
_SCREAMING_SNAKE_CASE : Tuple = do_rescale
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
a = GLPNImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = GLPNImageProcessingTester(self)
@property
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , """do_resize"""))
self.assertTrue(hasattr(_A , """size_divisor"""))
self.assertTrue(hasattr(_A , """resample"""))
self.assertTrue(hasattr(_A , """do_rescale"""))
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A)
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
_SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 338 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class lowercase_ ( a_ ):
__magic_name__ : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__magic_name__ : bool = field(default=a_ , metadata={"""help""": """Whether to SortishSamler or not."""} )
__magic_name__ : bool = field(
default=a_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__magic_name__ : bool = field(default=a_ , metadata={"""help""": """whether to use adafactor"""} )
__magic_name__ : Optional[float] = field(
default=a_ , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__magic_name__ : Optional[float] = field(
default=a_ , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__magic_name__ : Optional[float] = field(default=a_ , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__magic_name__ : Optional[float] = field(
default=a_ , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__magic_name__ : Optional[str] = field(
default="""linear""" , metadata={"""help""": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase=None, **lowerCamelCase) -> int:
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.', lowerCamelCase, )
super().__init__(args=lowerCamelCase, **lowerCamelCase)
| 89 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class a_ :
UpperCamelCase_ : Tuple = None
@experimental
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return _map_with_joblib(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = num_proc if num_proc <= len(lowerCAmelCase__ ) else len(lowerCAmelCase__ )
lowerCAmelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ ) // num_proc
lowerCAmelCase__ = len(lowerCAmelCase__ ) % num_proc
lowerCAmelCase__ = div * index + min(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(lowerCAmelCase__ )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(lowerCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowerCAmelCase__ , lowerCAmelCase__ = None, None
if not disable_tqdm:
lowerCAmelCase__ , lowerCAmelCase__ = (RLock(),), tqdm.set_lock
with Pool(lowerCAmelCase__ , initargs=lowerCAmelCase__ , initializer=lowerCAmelCase__ ) as pool:
lowerCAmelCase__ = pool.map(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f"""Finished {num_proc} processes""" )
lowerCAmelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(lowerCAmelCase__ )} objects""" )
return mapped
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowerCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(lowerCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCAmelCase__ = None
| 644 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 453 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
_UpperCAmelCase = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
_UpperCAmelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 518 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = 'realm'
def __init__( self : Any , _UpperCamelCase : Optional[Any]=30_522 , _UpperCamelCase : List[str]=768 , _UpperCamelCase : Tuple=128 , _UpperCamelCase : Dict=12 , _UpperCamelCase : int=12 , _UpperCamelCase : List[str]=8 , _UpperCamelCase : List[str]=3_072 , _UpperCamelCase : List[str]="gelu_new" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : int=2 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : str=1e-12 , _UpperCamelCase : Dict=256 , _UpperCamelCase : Any=10 , _UpperCamelCase : str=1e-3 , _UpperCamelCase : str=5 , _UpperCamelCase : Dict=320 , _UpperCamelCase : str=13_353_718 , _UpperCamelCase : Tuple=5_000 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : int=2 , **_UpperCamelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
# Common config
_lowercase: Optional[int] = vocab_size
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: List[str] = hidden_size
_lowercase: Union[str, Any] = retriever_proj_size
_lowercase: List[Any] = num_hidden_layers
_lowercase: str = num_attention_heads
_lowercase: Any = num_candidates
_lowercase: List[Any] = intermediate_size
_lowercase: Any = hidden_act
_lowercase: Dict = hidden_dropout_prob
_lowercase: int = attention_probs_dropout_prob
_lowercase: List[Any] = initializer_range
_lowercase: int = type_vocab_size
_lowercase: Tuple = layer_norm_eps
# Reader config
_lowercase: Any = span_hidden_size
_lowercase: List[Any] = max_span_width
_lowercase: str = reader_layer_norm_eps
_lowercase: Optional[int] = reader_beam_size
_lowercase: Optional[int] = reader_seq_len
# Retrieval config
_lowercase: int = num_block_records
_lowercase: int = searcher_beam_size
| 226 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCAmelCase__ = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCAmelCase__ = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
lowerCAmelCase__ = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
lowerCAmelCase__ = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
lowerCAmelCase__ = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
lowerCAmelCase__ = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def lowerCAmelCase_ (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split('''.''' )
lowerCAmelCase__ , lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split('''.''' )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def lowerCAmelCase_ () -> str:
'''simple docstring'''
lowerCAmelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ (lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Any="groupvit-gcc-yfcc" , lowercase__ : Optional[Any]=False ) -> str:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(lowerCAmelCase__ ).eval()
lowerCAmelCase__ = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' , lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 668 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=False, __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = '''backbone.''' if is_semantic else ''''''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False, __UpperCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
snake_case_ = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
snake_case_ = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
snake_case_ = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
snake_case_ = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = q_bias
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case_ = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
snake_case_ = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
snake_case_ = gamma_a
snake_case_ = gamma_a
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = dct.pop(lowerCAmelCase__ )
snake_case_ = val
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(lowerCAmelCase__, stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
snake_case_ = False if '''rvlcdip''' in checkpoint_url else True
snake_case_ = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase__, use_mask_token=lowerCAmelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
# labels
if "rvlcdip" in checkpoint_url:
snake_case_ = 16
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''rvlcdip-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase__, lowerCAmelCase__, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__, map_location='''cpu''' )['''model''']
snake_case_ = create_rename_keys(lowerCAmelCase__, has_lm_head=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__, lowerCAmelCase__, has_lm_head=lowerCAmelCase__ )
# load HuggingFace model
snake_case_ = BeitForMaskedImageModeling(lowerCAmelCase__ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
snake_case_ = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=lowerCAmelCase__ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='''pt''' )
snake_case_ = encoding['''pixel_values''']
snake_case_ = model(lowerCAmelCase__ )
snake_case_ = outputs.logits
# verify logits
snake_case_ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase__ ), "Shape of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
if has_lm_head:
snake_case_ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
snake_case_ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__, lowerCAmelCase__ ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=lowerCAmelCase__, )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__, lowerCAmelCase__ ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=lowerCAmelCase__, )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
a : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 640 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase__ , 2 ) + pow(lowerCAmelCase__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
def _lowerCAmelCase ( UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] ) -> int:
"""simple docstring"""
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A = [p / w for p, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
A = sorted(lowerCAmelCase__ )
# declaring useful variables
A = len(lowerCAmelCase__ )
A = 0
A = 0
A = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A = sorted_profit_by_weight[length - i - 1]
A = profit_by_weight.index(lowerCAmelCase__ )
A = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_lowercase : int = [int(x) for x in input("Input profits separated by spaces: ").split()]
_lowercase : Union[str, Any] = [int(x) for x in input("Input weights separated by spaces: ").split()]
_lowercase : int = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 641 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE : Optional[int] = grid[0]
for row_n in range(1 , len(lowerCAmelCase__ ) ):
_SCREAMING_SNAKE_CASE : Dict = grid[row_n]
_SCREAMING_SNAKE_CASE : Dict = fill_row(lowerCAmelCase__ , lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : str = grid[row_n]
return grid[-1][-1]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "ResNetConfig"
# Base docstring
__UpperCAmelCase = "microsoft/resnet-50"
__UpperCAmelCase = [1, 2_048, 7, 7]
# Image classification docstring
__UpperCAmelCase = "microsoft/resnet-50"
__UpperCAmelCase = "tiger cat"
__UpperCAmelCase = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowercase_ ( nn.Module ):
def __init__( self : List[str] , _lowercase : str , _lowercase : int , _lowercase : Dict = 3 , _lowercase : List[Any] = 1 , _lowercase : str = "relu" ):
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Convad(
_lowercase , _lowercase , kernel_size=_lowercase , stride=_lowercase , padding=kernel_size // 2 , bias=_lowercase )
lowerCAmelCase__ : Any = nn.BatchNormad(_lowercase )
lowerCAmelCase__ : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCAmelCase ( self : Optional[Any] , _lowercase : Any ):
lowerCAmelCase__ : Optional[int] = self.convolution(_lowercase )
lowerCAmelCase__ : Dict = self.normalization(_lowercase )
lowerCAmelCase__ : int = self.activation(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Dict , _lowercase : Optional[Any] ):
super().__init__()
lowerCAmelCase__ : Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ : Optional[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCAmelCase__ : Union[str, Any] = config.num_channels
def _lowerCAmelCase ( self : Dict , _lowercase : Any ):
lowerCAmelCase__ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCAmelCase__ : Any = self.embedder(_lowercase )
lowerCAmelCase__ : List[Any] = self.pooler(_lowercase )
return embedding
class lowercase_ ( nn.Module ):
def __init__( self : Any , _lowercase : Optional[int] , _lowercase : int , _lowercase : List[str] = 2 ):
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Convad(_lowercase , _lowercase , kernel_size=1 , stride=_lowercase , bias=_lowercase )
lowerCAmelCase__ : Optional[Any] = nn.BatchNormad(_lowercase )
def _lowerCAmelCase ( self : Dict , _lowercase : Tuple ):
lowerCAmelCase__ : List[str] = self.convolution(_lowercase )
lowerCAmelCase__ : List[str] = self.normalization(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Any , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Dict = 1 , _lowercase : int = "relu" ):
super().__init__()
lowerCAmelCase__ : Optional[Any] = in_channels != out_channels or stride != 1
lowerCAmelCase__ : Tuple = (
ResNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : Tuple = nn.Sequential(
ResNetConvLayer(_lowercase , _lowercase , stride=_lowercase ) , ResNetConvLayer(_lowercase , _lowercase , activation=_lowercase ) , )
lowerCAmelCase__ : Optional[int] = ACTaFN[activation]
def _lowerCAmelCase ( self : Tuple , _lowercase : List[str] ):
lowerCAmelCase__ : int = hidden_state
lowerCAmelCase__ : Union[str, Any] = self.layer(_lowercase )
lowerCAmelCase__ : Optional[Any] = self.shortcut(_lowercase )
hidden_state += residual
lowerCAmelCase__ : Tuple = self.activation(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] = 1 , _lowercase : Any = "relu" , _lowercase : List[Any] = 4 ):
super().__init__()
lowerCAmelCase__ : int = in_channels != out_channels or stride != 1
lowerCAmelCase__ : List[str] = out_channels // reduction
lowerCAmelCase__ : Dict = (
ResNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : List[Any] = nn.Sequential(
ResNetConvLayer(_lowercase , _lowercase , kernel_size=1 ) , ResNetConvLayer(_lowercase , _lowercase , stride=_lowercase ) , ResNetConvLayer(_lowercase , _lowercase , kernel_size=1 , activation=_lowercase ) , )
lowerCAmelCase__ : Any = ACTaFN[activation]
def _lowerCAmelCase ( self : int , _lowercase : Optional[int] ):
lowerCAmelCase__ : Dict = hidden_state
lowerCAmelCase__ : str = self.layer(_lowercase )
lowerCAmelCase__ : str = self.shortcut(_lowercase )
hidden_state += residual
lowerCAmelCase__ : Union[str, Any] = self.activation(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : str , _lowercase : str , _lowercase : int , _lowercase : int , _lowercase : List[str] = 2 , _lowercase : List[Any] = 2 , ):
super().__init__()
lowerCAmelCase__ : Tuple = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowerCAmelCase__ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowercase , _lowercase , stride=_lowercase , activation=config.hidden_act ) , *[layer(_lowercase , _lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : Tuple ):
lowerCAmelCase__ : int = input
for layer in self.layers:
lowerCAmelCase__ : str = layer(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : str , _lowercase : Union[str, Any] ):
super().__init__()
lowerCAmelCase__ : Union[str, Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(_lowercase , _lowercase , _lowercase , depth=_lowercase ) )
def _lowerCAmelCase ( self : int , _lowercase : Tuple , _lowercase : Union[str, Any] = False , _lowercase : int = True ):
lowerCAmelCase__ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ : List[Any] = hidden_states + (hidden_state,)
lowerCAmelCase__ : List[Any] = stage_module(_lowercase )
if output_hidden_states:
lowerCAmelCase__ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowercase , hidden_states=_lowercase , )
class lowercase_ ( a_ ):
__magic_name__ : Tuple = ResNetConfig
__magic_name__ : Tuple = 'resnet'
__magic_name__ : List[Any] = 'pixel_values'
__magic_name__ : Union[str, Any] = True
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : Optional[Any] ):
if isinstance(_lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCAmelCase ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Tuple=False ):
if isinstance(_lowercase , _lowercase ):
lowerCAmelCase__ : Optional[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , a_ , )
class lowercase_ ( a_ ):
def __init__( self : List[str] , _lowercase : List[str] ):
super().__init__(_lowercase )
lowerCAmelCase__ : Optional[int] = config
lowerCAmelCase__ : int = ResNetEmbeddings(_lowercase )
lowerCAmelCase__ : str = ResNetEncoder(_lowercase )
lowerCAmelCase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : Tuple , _lowercase : int , _lowercase : Optional[int] = None , _lowercase : Dict = None ):
lowerCAmelCase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : int = self.embedder(_lowercase )
lowerCAmelCase__ : Any = self.encoder(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowerCAmelCase__ : Dict = encoder_outputs[0]
lowerCAmelCase__ : Optional[Any] = self.pooler(_lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , a_ , )
class lowercase_ ( a_ ):
def __init__( self : int , _lowercase : List[Any] ):
super().__init__(_lowercase )
lowerCAmelCase__ : List[str] = config.num_labels
lowerCAmelCase__ : Any = ResNetModel(_lowercase )
# classification head
lowerCAmelCase__ : List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : str , _lowercase : Optional[Any] = None , _lowercase : Union[str, Any] = None , _lowercase : Dict = None , _lowercase : Optional[Any] = None , ):
lowerCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Union[str, Any] = self.resnet(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowerCAmelCase__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Tuple = self.classifier(_lowercase )
lowerCAmelCase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : Tuple = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : List[str] = "single_label_classification"
else:
lowerCAmelCase__ : List[Any] = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ : str = loss_fct(_lowercase , _lowercase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : List[str] = CrossEntropyLoss()
lowerCAmelCase__ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : Any = BCEWithLogitsLoss()
lowerCAmelCase__ : Dict = loss_fct(_lowercase , _lowercase )
if not return_dict:
lowerCAmelCase__ : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n """ , a_ , )
class lowercase_ ( a_ , a_ ):
def __init__( self : str , _lowercase : int ):
super().__init__(_lowercase )
super()._init_backbone(_lowercase )
lowerCAmelCase__ : str = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase__ : Any = ResNetEmbeddings(_lowercase )
lowerCAmelCase__ : Optional[Any] = ResNetEncoder(_lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@replace_return_docstrings(output_type=_lowercase , config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : Dict , _lowercase : Optional[int] = None , _lowercase : int = None ):
lowerCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Union[str, Any] = self.embedder(_lowercase )
lowerCAmelCase__ : List[Any] = self.encoder(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowerCAmelCase__ : List[Any] = outputs.hidden_states
lowerCAmelCase__ : Optional[int] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase__ : Optional[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_lowercase , )
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
def UpperCamelCase_( lowerCamelCase_ = 5000_0000 ) -> List[str]:
_lowercase : Union[str, Any] = set()
_lowercase : Optional[int] = int((limit - 24) ** (1 / 2) )
_lowercase : Optional[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) )
for primea in primes:
_lowercase : Optional[Any] = primea * primea
for primea in primes:
_lowercase : List[str] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowercase : Optional[int] = primea * primea * primea * primea
_lowercase : Optional[Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 89 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
"""simple docstring"""
import torch
from torch import nn
class a_ ( nn.Module ):
def __init__( self : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int=1 , snake_case__ : str=False ):
super().__init__()
lowerCAmelCase__ = n_token
lowerCAmelCase__ = d_embed
lowerCAmelCase__ = d_proj
lowerCAmelCase__ = cutoffs + [n_token]
lowerCAmelCase__ = [0] + self.cutoffs
lowerCAmelCase__ = div_val
lowerCAmelCase__ = self.cutoffs[0]
lowerCAmelCase__ = len(self.cutoffs ) - 1
lowerCAmelCase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ = nn.ModuleList()
lowerCAmelCase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ , snake_case__ ) ) )
else:
self.out_projs.append(snake_case__ )
self.out_layers.append(nn.Linear(snake_case__ , snake_case__ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ , snake_case__ ) ) )
self.out_layers.append(nn.Linear(snake_case__ , r_idx - l_idx ) )
lowerCAmelCase__ = keep_order
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[Any] ):
if proj is None:
lowerCAmelCase__ = nn.functional.linear(snake_case__ , snake_case__ , bias=snake_case__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ = nn.functional.linear(snake_case__ , proj.t().contiguous() )
lowerCAmelCase__ = nn.functional.linear(snake_case__ , snake_case__ , bias=snake_case__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ = labels[..., 1:].contiguous()
lowerCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ = self._compute_logit(snake_case__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ = labels != -100
lowerCAmelCase__ = torch.zeros_like(snake_case__ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ = (
-nn.functional.log_softmax(snake_case__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ = nn.functional.log_softmax(snake_case__ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ = self.out_layers[i].weight
lowerCAmelCase__ = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = nn.functional.log_softmax(snake_case__ , dim=1 )
if labels is None:
lowerCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ = torch.zeros_like(snake_case__ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ = labels.index_select(0 , snake_case__ ) - l_idx
lowerCAmelCase__ = head_logprob.index_select(0 , snake_case__ )
lowerCAmelCase__ = hidden.index_select(0 , snake_case__ )
else:
lowerCAmelCase__ = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = nn.functional.log_softmax(snake_case__ , dim=1 )
lowerCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : int ):
if self.n_clusters == 0:
lowerCAmelCase__ = self._compute_logit(snake_case__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case__ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ = self.out_layers[i].weight
lowerCAmelCase__ = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ = nn.functional.log_softmax(snake_case__ , dim=1 )
lowerCAmelCase__ = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = nn.functional.log_softmax(snake_case__ , dim=1 )
lowerCAmelCase__ = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ = logprob_i
return out
| 644 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Dict:
if num <= 0:
raise ValueError("Input must be a positive integer" )
lowerCAmelCase__ : int = [True] * (num + 1)
lowerCAmelCase__ : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 453 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import os
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
with open(os.path.dirname(lowerCAmelCase__ ) + """/grid.txt""" ) as f:
_UpperCAmelCase = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
_UpperCAmelCase = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
_UpperCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCAmelCase = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
_UpperCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCAmelCase = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
_UpperCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCAmelCase = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
_UpperCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution()) | 518 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Dict , ):
_lowercase: Any = parent
_lowercase: Tuple = 13
_lowercase: int = 7
_lowercase: Any = True
_lowercase: List[str] = True
_lowercase: Optional[int] = True
_lowercase: List[Any] = 99
_lowercase: Any = 32
_lowercase: Optional[Any] = 2
_lowercase: List[Any] = 4
_lowercase: Dict = 37
_lowercase: List[Any] = "gelu"
_lowercase: Any = 0.1
_lowercase: Dict = 0.1
_lowercase: Optional[int] = 512
_lowercase: Any = 16
_lowercase: Tuple = 2
_lowercase: Optional[Any] = 0.0_2
_lowercase: Any = 3
_lowercase: Dict = 4
_lowercase: str = None
def UpperCAmelCase__ ( self : List[str]):
_lowercase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase: Any = None
if self.use_input_mask:
_lowercase: List[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase: Union[str, Any] = None
_lowercase: Union[str, Any] = None
_lowercase: List[Any] = None
if self.use_labels:
_lowercase: int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowercase: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowercase: List[Any] = ids_tensor([self.batch_size] , self.num_choices)
_lowercase: Union[str, Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[Any]):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): Optional[Any] = self.prepare_config_and_inputs()
_lowercase: List[Any] = True
_lowercase: str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowercase: str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]):
_lowercase: Tuple = TFEsmModel(config=_UpperCamelCase)
_lowercase: str = {"input_ids": input_ids, "attention_mask": input_mask}
_lowercase: Optional[int] = model(_UpperCamelCase)
_lowercase: Optional[int] = [input_ids, input_mask]
_lowercase: int = model(_UpperCamelCase)
_lowercase: Tuple = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any , ):
_lowercase: Dict = True
_lowercase: Optional[Any] = TFEsmModel(config=_UpperCamelCase)
_lowercase: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_lowercase: Optional[int] = model(_UpperCamelCase)
_lowercase: Optional[int] = [input_ids, input_mask]
_lowercase: List[Any] = model(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase)
# Also check the case where encoder outputs are not passed
_lowercase: Dict = model(_UpperCamelCase , attention_mask=_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : int):
_lowercase: int = TFEsmForMaskedLM(config=_UpperCamelCase)
_lowercase: List[str] = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self : str , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[str]):
_lowercase: Union[str, Any] = self.num_labels
_lowercase: int = TFEsmForTokenClassification(config=_UpperCamelCase)
_lowercase: Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_lowercase: Union[str, Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Tuple = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): List[Any] = config_and_inputs
_lowercase: str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Tuple = False
def UpperCAmelCase__ ( self : Tuple):
_lowercase: List[str] = TFEsmModelTester(self)
_lowercase: str = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37)
def UpperCAmelCase__ ( self : int):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple):
_lowercase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def UpperCAmelCase__ ( self : Any):
_lowercase: List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase)
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase)
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase)
@slow
def UpperCAmelCase__ ( self : Optional[int]):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase: Tuple = TFEsmModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
@unittest.skip("Protein models do not support embedding resizing.")
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip("Protein models do not support embedding resizing.")
def UpperCAmelCase__ ( self : Tuple):
pass
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase , _lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: List[str] = model_class(_UpperCamelCase)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowercase: List[str] = model.get_bias()
assert isinstance(_UpperCamelCase , _UpperCamelCase)
for k, v in name.items():
assert isinstance(_UpperCamelCase , tf.Variable)
else:
_lowercase: List[str] = model.get_output_embeddings()
assert x is None
_lowercase: int = model.get_bias()
assert name is None
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Any):
_lowercase: Any = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowercase: List[str] = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowercase: Optional[Any] = model(_UpperCamelCase)[0]
_lowercase: Union[str, Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , _UpperCamelCase)
# compare the actual values for a slice.
_lowercase: Optional[int] = tf.constant(
[
[
[8.9_2_1_5_1_8, -10.589_814, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -13.911_377, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -13.951_557, -3.7_4_0_5_9_2],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2))
@slow
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: List[str] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowercase: Tuple = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_lowercase: Union[str, Any] = model(_UpperCamelCase)[0]
# compare the actual values for a slice.
_lowercase: Any = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 226 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE_ : Optional[int]=36 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : str=37 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=512 , SCREAMING_SNAKE_CASE_ : Optional[int]=16 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : int=6 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_000 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = text_seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = coordinate_size
lowerCAmelCase__ = shape_size
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase__ = text_seq_length
lowerCAmelCase__ = (image_size // patch_size) ** 2 + 1
lowerCAmelCase__ = self.text_seq_length + self.image_seq_length
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase__ = bbox[i, j, 3]
lowerCAmelCase__ = bbox[i, j, 1]
lowerCAmelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase__ = bbox[i, j, 2]
lowerCAmelCase__ = bbox[i, j, 0]
lowerCAmelCase__ = t
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase__ = LayoutLMvaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# text + image
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = LayoutLMvaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = LayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = LayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = False
UpperCamelCase_ :List[Any] = False
UpperCamelCase_ :Tuple = False
UpperCamelCase_ :str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ :Dict = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = LayoutLMvaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
lowerCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
return inputs_dict
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : str ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = LayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : List[str] ):
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ ) if is_vision_available() else None
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor([[1, 2]] )
lowerCAmelCase__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase__ = model(
input_ids=input_ids.to(SCREAMING_SNAKE_CASE_ ) , bbox=bbox.to(SCREAMING_SNAKE_CASE_ ) , pixel_values=pixel_values.to(SCREAMING_SNAKE_CASE_ ) , )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
'''simple docstring'''
from math import ceil
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = list(range(0, lowerCAmelCase__ ) )
snake_case_ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
snake_case_ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase__ )
# Missing blocks
snake_case_ = [i for i in blocks if i not in device_map_blocks]
snake_case_ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(lowerCAmelCase__ ) )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = list(range(lowerCAmelCase__ ) )
snake_case_ = int(ceil(n_layers / len(lowerCAmelCase__ ) ) )
snake_case_ = [layers[i : i + n_blocks] for i in range(0, lowerCAmelCase__, lowerCAmelCase__ )]
return dict(zip(lowerCAmelCase__, lowerCAmelCase__ ) )
| 640 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
import math
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
_A = [True] * n
_A = False
_A = False
_A = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_A = i * 2
while index < n:
_A = False
_A = index + i
_A = [2]
for i in range(3 , lowerCAmelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def __lowercase ( __lowercase = 9999_6666_3333 ) -> Union[str, Any]:
'''simple docstring'''
_A = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
_A = prime_sieve(lowerCAmelCase__ )
_A = 0
_A = 0
_A = primes[prime_index]
while (last_prime**2) <= limit:
_A = primes[prime_index + 1]
_A = last_prime**2
_A = next_prime**2
# Get numbers divisible by lps(current)
_A = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_A = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_A = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_A = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 330 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> Optional[Any]:
"""simple docstring"""
A = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> str:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
A = []
for current_row_idx in range(lowerCAmelCase__ ):
A = populate_current_row(lowerCAmelCase__ , lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: int ) -> List[str]:
"""simple docstring"""
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A , A = 1, 1
for current_col_idx in range(1 , lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return current_row
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Dict , ) -> Optional[int]:
"""simple docstring"""
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def _lowerCAmelCase ( UpperCamelCase__: List[Any] ) -> str:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
A = [[1]]
for row_index in range(1 , lowerCAmelCase__ ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(lowerCAmelCase__ , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase__: Any , UpperCamelCase__: int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 641 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
a = RoCBertTokenizer
a = None
a = False
a = True
a = filter_non_english
def _lowerCAmelCase ( self : str):
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
_SCREAMING_SNAKE_CASE : List[Any] = {}
_SCREAMING_SNAKE_CASE : str = {}
for i, value in enumerate(_A):
_SCREAMING_SNAKE_CASE : List[str] = i
_SCREAMING_SNAKE_CASE : Union[str, Any] = i
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(_A , _A , ensure_ascii=_A)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(_A , _A , ensure_ascii=_A)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(_A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_A) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_A) , [5, 6, 2, 5, 7, 8])
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = RoCBertBasicTokenizer(do_lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = RoCBertBasicTokenizer(do_lower_case=_A , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE : List[str] = {}
for i, token in enumerate(_A):
_SCREAMING_SNAKE_CASE : List[str] = i
_SCREAMING_SNAKE_CASE : List[Any] = RoCBertWordpieceTokenizer(vocab=_A , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def _lowerCAmelCase ( self : int):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_A) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : str = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_SCREAMING_SNAKE_CASE : int = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.do_lower_case if hasattr(_A , """do_lower_case""") else False
_SCREAMING_SNAKE_CASE : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE : Any = """""".join(_A)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.encode(_A , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.encode(_A , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.convert_ids_to_tokens(_A)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A)
self.assertListEqual(_A , _A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : int = tokenizer_r.encode(_A , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode(_A , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.convert_ids_to_tokens(_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_A)
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE : List[str] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A)
]
self.assertListEqual(_A , _A)
self.assertListEqual(_A , _A)
@slow
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("""你好""" , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode("""你是谁""" , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : int = tokenizer.build_inputs_with_special_tokens(_A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.get_tokenizers(do_lower_case=_A)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
_SCREAMING_SNAKE_CASE : List[Any] = """你好,你是谁"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(_A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_shape_ids(_A)
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_pronunciation_ids(_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.prepare_for_model(
_A , _A , _A , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode_plus(_A , add_special_tokens=_A)
self.assertEqual(_A , _A)
| 338 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase__ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ) -> List[Any]:
lowerCAmelCase__ : Any = u
for i in range(1 , lowerCAmelCase__ ):
lowerCAmelCase__ : Any = temp * (u - i)
return temp
def lowercase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = int(input("enter the numbers of values: " ) )
lowerCAmelCase__ : Optional[Any] = []
for _ in range(lowerCAmelCase__ ):
y.append([] )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
y[i].append(lowerCAmelCase__ )
lowerCAmelCase__ : Union[str, Any] = 0
print("enter the values of parameters in a list: " )
lowerCAmelCase__ : List[str] = list(map(lowerCAmelCase__ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCAmelCase__ ):
lowerCAmelCase__ : int = float(input() )
lowerCAmelCase__ : str = int(input("enter the value to interpolate: " ) )
lowerCAmelCase__ : Tuple = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase__ ):
for j in range(n - i ):
lowerCAmelCase__ : Tuple = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase__ : List[str] = y[0][0]
for i in range(1 , lowerCAmelCase__ ):
summ += (ucal(lowerCAmelCase__ , lowerCAmelCase__ ) * y[0][i]) / math.factorial(lowerCAmelCase__ )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE : Optional[Any] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = 'retribert'
def __init__( self, lowerCamelCase=3_05_22, lowerCamelCase=7_68, lowerCamelCase=8, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=True, lowerCamelCase=1_28, lowerCamelCase=0, **lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : Tuple = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Any = intermediate_size
_lowercase : Dict = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : List[Any] = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Union[str, Any] = share_encoders
_lowercase : Any = projection_dim
| 89 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( lowercase__ = 1_0_0_1 ) -> Dict:
lowerCAmelCase__ : str = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase__ : str = 2 * i + 1
lowerCAmelCase__ : Any = 2 * i
lowerCAmelCase__ : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 453 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = None , ) -> Union[str, Any]:
if config_name_or_path is None:
_UpperCAmelCase = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
_UpperCAmelCase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_UpperCAmelCase = question_encoder_name_or_path
_UpperCAmelCase = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
_UpperCAmelCase = RagConfig.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase = gen_config
_UpperCAmelCase = question_encoder_config
_UpperCAmelCase = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase__ , lowerCAmelCase__ , config=lowerCAmelCase__ )
rag_model.save_pretrained(lowerCAmelCase__ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase__ )
# Save tokenizers.
_UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
a = parser.parse_args()
a = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 518 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_SCREAMING_SNAKE_CASE : int = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Union[str, Any] = _TestCommandArgs(dataset=lowerCAmelCase__ , all_configs=lowerCAmelCase__ , save_infos=lowerCAmelCase__ )
_lowercase: Tuple = TestCommand(*lowerCAmelCase__ )
test_command.run()
_lowercase: Optional[int] = os.path.join(lowerCAmelCase__ , "README.md" )
assert os.path.exists(lowerCAmelCase__ )
_lowercase: List[Any] = DatasetInfosDict.from_directory(lowerCAmelCase__ )
_lowercase: int = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_lowercase , _lowercase: List[str] = getattr(dataset_infos["default"] , lowerCAmelCase__ ), getattr(expected_dataset_infos["default"] , lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ , lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 226 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__(features=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase__ = {}
if isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCAmelCase__ = {'''dtype''': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase__ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_ , '''__array__''' ) and not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
lowerCAmelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Tuple ):
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE_ , map_list=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_ , pa_table.column_names[0] )
lowerCAmelCase__ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
lowerCAmelCase__ = self._consolidate(batch[column_name] )
return batch
| 668 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
a : Optional[int] = TypeVar('T')
class a ( Generic[T] ):
def __init__( self : Optional[int] , lowercase_ : Tuple ):
snake_case_ = data
snake_case_ = self
snake_case_ = 0
class a ( Generic[T] ):
def __init__( self : int ):
# map from node name to the node object
snake_case_ = {}
def A_ ( self : Optional[Any] , lowercase_ : Union[str, Any] ):
# create a new set with x as its member
snake_case_ = DisjointSetTreeNode(lowercase_ )
def A_ ( self : int , lowercase_ : List[str] ):
# find the set x belongs to (with path-compression)
snake_case_ = self.map[data]
if elem_ref != elem_ref.parent:
snake_case_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def A_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict ):
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case_ = nodea
else:
snake_case_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def A_ ( self : Dict , lowercase_ : str , lowercase_ : str ):
# merge 2 disjoint sets
self.link(self.find_set(lowercase_ ) , self.find_set(lowercase_ ) )
class a ( Generic[T] ):
def __init__( self : str ):
# connections: map from the node to the neighbouring nodes (with weights)
snake_case_ = {}
def A_ ( self : Union[str, Any] , lowercase_ : List[Any] ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case_ = {}
def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[int] ):
# add an edge with the given weight
self.add_node(lowercase_ )
self.add_node(lowercase_ )
snake_case_ = weight
snake_case_ = weight
def A_ ( self : List[Any] ):
snake_case_ = []
snake_case_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowercase_ : x[2] )
# creating the disjoint set
snake_case_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowercase_ )
# MST generation
snake_case_ = 0
snake_case_ = 0
snake_case_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case_ ,snake_case_ ,snake_case_ = edges[index]
index += 1
snake_case_ = disjoint_set.find_set(lowercase_ )
snake_case_ = disjoint_set.find_set(lowercase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowercase_ , lowercase_ , lowercase_ )
disjoint_set.union(lowercase_ , lowercase_ )
return graph
| 640 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
lowerCamelCase_ = 8.3144598
def __lowercase ( __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase_ = 3_00
lowerCamelCase_ = 28
lowerCamelCase_ = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 330 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowercase : str = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowercase : Optional[int] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowercase : Optional[int] = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_lowercase : str = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = ["input_ids"]
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = RESOURCE_FILES_NAMES
def __init__( self , a__ , a__=None , a__=False , a__="utf8" , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__ = None , **a__ , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , vocab_file=a__ , encoding=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
A = do_lower_case
A = sentencepiece_model_ckpt
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A = self.load_vocab(filepath=a__ )
else:
A = {self.sp_model.id_to_piece(a__ ): id for id in range(self.sp_model.get_piece_size() )}
A = {v: k for k, v in self.vocab.items()}
def _UpperCAmelCase ( self , a__ ) -> List[Any]:
if text is None:
return None
A = self.tokenize(a__ )
A , A = """""", []
for i, ch in enumerate(a__ ):
if ch in self.SP_CHAR_MAPPING:
A = self.SP_CHAR_MAPPING.get(a__ )
else:
A = unicodedata.normalize("""NFKC""" , a__ )
if self.is_whitespace(a__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(a__ ) )
A , A , A = normalized_text, [], 0
if self.do_lower_case:
A = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A = token[1:]
A = text[offset:].index(a__ ) + offset
A = start + len(a__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A = end
return token_mapping
@property
def _UpperCAmelCase ( self ) -> Any:
return len(self.vocab )
def _UpperCAmelCase ( self ) -> Any:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Optional[int]:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self , a__ ) -> Optional[int]:
A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
return "".join((self.SP_CHAR_MAPPING.get(a__ , a__ ) for c in text) )
def _UpperCAmelCase ( self , a__ , a__=False , a__=64 , a__=0.1 ) -> Union[str, Any]:
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
A = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
A = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
A = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
A = self.sp_model.EncodeAsPieces(a__ )
else:
A = self.sp_model.SampleEncodeAsPieces(a__ , a__ , a__ )
A = []
for pi, piece in enumerate(a__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(a__ ) and pi != 0:
new_pieces.append(a__ )
continue
else:
continue
A = 0
for i, chunk in enumerate(a__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(a__ ) or self.is_punct(a__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(a__ )
A = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A = i
if len(a__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _UpperCAmelCase ( self , a__ ) -> str:
A = """""".join(a__ ).replace(a__ , """ """ ).strip()
return out_string
def _UpperCAmelCase ( self , a__ ) -> str:
A = self.convert_ids_to_tokens(a__ )
A = """""".join(a__ ).replace(a__ , """ """ ).strip()
return out_string
def _UpperCAmelCase ( self , a__ ) -> Any:
return self.vocab.get(a__ , self.vocab.get(self.unk_token ) )
def _UpperCAmelCase ( self , a__ ) -> Any:
return self.reverse_vocab.get(a__ , self.unk_token )
def _UpperCAmelCase ( self , a__ , a__=None ) -> Any:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _UpperCAmelCase ( self , a__ , a__=None ) -> List[str]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _UpperCAmelCase ( self , a__ , a__=None , a__=False ) -> Any:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[str]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(a__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(a__ ) + 1) + [1] * (len(a__ ) + 3)
def _UpperCAmelCase ( self , a__ ) -> List[Any]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Any:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(a__ ) == 1:
A = unicodedata.category(a__ )
if cat == "Zs":
return True
return False
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
A = {}
with io.open(a__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(a__ ):
A = line.rstrip("""\n""" )
A = int(a__ )
return token_to_idx
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[str]:
A = 0
if os.path.isdir(a__ ):
A = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
A = token_index
writer.write(token + """\n""" )
index += 1
A = os.path.join(a__ , """sentencepiece.bpe.model""" )
with open(a__ , """wb""" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (vocab_file,)
| 641 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None )-> Dict:
if start is None:
_SCREAMING_SNAKE_CASE : str = 0
if end is None:
_SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCAmelCase__ ) - 1
if start >= end:
return
_SCREAMING_SNAKE_CASE : List[str] = (start + end) // 2
slowsort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
slowsort(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ )
if sequence[end] < sequence[mid]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = sequence[mid], sequence[end]
slowsort(lowerCAmelCase__ , lowerCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 338 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase_ ( a_ ):
def __init__( self : int , *_lowercase : Dict , **_lowercase : Any ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE : Tuple = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
SCREAMING_SNAKE_CASE : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
SCREAMING_SNAKE_CASE : List[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="dummy_doc" ) -> str:
_lowercase : Optional[Any] = {doc: key_lines}
_lowercase : Optional[Any] = {doc: sys_lines}
_lowercase : int = {}
_lowercase : Union[str, Any] = 0
_lowercase : Dict = 0
_lowercase : Union[str, Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : Any = 0
_lowercase , _lowercase : Union[str, Any] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowercase : Dict = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase , _lowercase : Optional[Any] = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowercase : Union[str, Any] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
_lowercase , _lowercase : Optional[int] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowercase , _lowercase : int = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowercase : int = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase : Union[str, Any] = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase : str = {}
_lowercase : Any = 0
_lowercase : Union[str, Any] = 0
for name, metric in metrics:
_lowercase , _lowercase , _lowercase : Any = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowercase : Optional[Any] = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : str = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_lowercase : Optional[Any] = line.split()[5]
if not parse_col == "-":
_lowercase : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Sequence(datasets.Value('string')),
}), codebase_urls=['https://github.com/ns-moosavi/coval'], reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
], )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False) -> str:
"""simple docstring"""
_lowercase : Dict = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_lowercase : str = util.check_gold_parse_annotation(lowerCamelCase)
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowercase : Tuple = evaluate(
key_lines=lowerCamelCase, sys_lines=lowerCamelCase, metrics=lowerCamelCase, NP_only=lowerCamelCase, remove_nested=lowerCamelCase, keep_singletons=lowerCamelCase, min_span=lowerCamelCase, )
return score
| 89 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[Any] = (DPMSolverSDEScheduler,)
UpperCamelCase_ : Any = 10
def _SCREAMING_SNAKE_CASE ( self : List[str] , **snake_case__ : int ):
lowerCAmelCase__ = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case__ )
return config
def _SCREAMING_SNAKE_CASE ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(snake_case__ , snake_case__ )
lowerCAmelCase__ = model(snake_case__ , snake_case__ )
lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(snake_case__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase__ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(snake_case__ , snake_case__ )
lowerCAmelCase__ = model(snake_case__ , snake_case__ )
lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(snake_case__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(snake_case__ , snake_case__ )
lowerCAmelCase__ = model(snake_case__ , snake_case__ )
lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(snake_case__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**snake_case__ , use_karras_sigmas=snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(snake_case__ )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(snake_case__ , snake_case__ )
lowerCAmelCase__ = model(snake_case__ , snake_case__ )
lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(snake_case__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 644 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __a :
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Optional[int] = source_vertex
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {self.source_vertex}
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Dict = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case )
lowerCAmelCase__ : Optional[int] = vertex
queue.append(snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ : int = self.parent.get(snake_case )
if target_vertex_parent is None:
lowerCAmelCase__ : Any = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case )
return self.shortest_path(snake_case ) + F"""->{target_vertex}"""
if __name__ == "__main__":
_UpperCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 453 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
from manim import *
class _A ( __lowercase ):
def UpperCAmelCase ( self ):
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
_UpperCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
_UpperCAmelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
_UpperCAmelCase = Text("""CPU""" , font_size=24 )
_UpperCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [mem.copy() for i in range(4 )]
_UpperCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
_UpperCAmelCase = Text("""GPU""" , font_size=24 )
_UpperCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
_UpperCAmelCase = Text("""Model""" , font_size=24 )
_UpperCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCAmelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
_UpperCAmelCase = Text("""Loaded Checkpoint""" , font_size=24 )
_UpperCAmelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , aligned_edge=_SCREAMING_SNAKE_CASE , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCAmelCase = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
first_animations.append(GrowFromCenter(_SCREAMING_SNAKE_CASE , run_time=1 ) )
_UpperCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait() | 518 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_SCREAMING_SNAKE_CASE : Any = re.compile(R'\s+')
def __lowerCAmelCase ( __magic_name__ ):
return {"hash": hashlib.mda(re.sub(lowerCAmelCase__ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: str = [len(lowerCAmelCase__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase__ ), "line_max": max(lowerCAmelCase__ )}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __lowerCAmelCase ( __magic_name__ , __magic_name__=5 ):
_lowercase: Optional[Any] = ["auto-generated", "autogenerated", "automatically generated"]
_lowercase: int = example["content"].splitlines()
for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __lowerCAmelCase ( __magic_name__ , __magic_name__=5 , __magic_name__=0.05 ):
_lowercase: Any = ["unit tests", "test file", "configuration file"]
_lowercase: List[Any] = example["content"].splitlines()
_lowercase: int = 0
_lowercase: List[Any] = 0
# first test
for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowercase: Tuple = example["content"].count("\n" )
_lowercase: List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Optional[Any] = ["def ", "class ", "for ", "while "]
_lowercase: Any = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __lowerCAmelCase ( __magic_name__ , __magic_name__=4 ):
_lowercase: Union[str, Any] = example["content"].splitlines()
_lowercase: int = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: List[str] = tokenizer(example["content"] , truncation=lowerCAmelCase__ )["input_ids"]
_lowercase: int = len(example["content"] ) / len(lowerCAmelCase__ )
return {"ratio": ratio}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: List[str] = {}
results.update(get_hash(lowerCAmelCase__ ) )
results.update(line_stats(lowerCAmelCase__ ) )
results.update(alpha_stats(lowerCAmelCase__ ) )
results.update(char_token_ratio(lowerCAmelCase__ ) )
results.update(is_autogenerated(lowerCAmelCase__ ) )
results.update(is_config_or_test(lowerCAmelCase__ ) )
results.update(has_no_keywords(lowerCAmelCase__ ) )
results.update(has_few_assignments(lowerCAmelCase__ ) )
return results
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not check_uniques(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __lowerCAmelCase ( __magic_name__ ):
with open(lowerCAmelCase__ , "rb" ) as f_in:
with gzip.open(str(lowerCAmelCase__ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ )
os.unlink(lowerCAmelCase__ )
# Settings
_SCREAMING_SNAKE_CASE : Any = HfArgumentParser(PreprocessingArguments)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
if args.num_workers is None:
_SCREAMING_SNAKE_CASE : str = multiprocessing.cpu_count()
_SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_SCREAMING_SNAKE_CASE : Tuple = time.time()
_SCREAMING_SNAKE_CASE : str = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
_SCREAMING_SNAKE_CASE : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_SCREAMING_SNAKE_CASE : Tuple = set(ds.unique('hash'))
_SCREAMING_SNAKE_CASE : str = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
_SCREAMING_SNAKE_CASE : List[Any] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_SCREAMING_SNAKE_CASE : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
_SCREAMING_SNAKE_CASE : Tuple = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
_SCREAMING_SNAKE_CASE : Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_SCREAMING_SNAKE_CASE : List[Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
_SCREAMING_SNAKE_CASE : Optional[int] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 226 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Any ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase__ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
lowerCAmelCase__ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase__ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase__ = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ , timeout=0.001 )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = ''''''
for new_text in streamer:
streamer_text += new_text
| 668 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a ( _lowerCamelCase ):
snake_case_ = ['pixel_values']
def __init__( self : str , lowercase_ : Optional[int] = True , lowercase_ : str = None , lowercase_ : int = PILImageResampling.BICUBIC , lowercase_ : List[Any] = True , lowercase_ : List[Any] = None , lowercase_ : Optional[Any] = True , lowercase_ : str = 1 / 255 , lowercase_ : Any = True , lowercase_ : int = None , lowercase_ : Any = None , lowercase_ : Tuple = True , **lowercase_ : int , ):
super().__init__(**lowercase_ )
snake_case_ = size if size is not None else {'''shortest_edge''': 224}
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='''crop_size''' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def A_ ( self : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[Any] = PILImageResampling.BICUBIC , lowercase_ : List[Any] = None , **lowercase_ : List[str] , ):
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Dict , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Tuple = None , **lowercase_ : int , ):
snake_case_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : str = None , **lowercase_ : Dict , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : int = None , **lowercase_ : Dict , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : str = None , lowercase_ : Dict = None , lowercase_ : Union[str, Any] = None , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Any] = None , lowercase_ : Tuple = None , lowercase_ : Any = None , lowercase_ : List[str] = None , lowercase_ : Union[str, Any] = None , lowercase_ : Tuple = None , lowercase_ : List[Any] = None , lowercase_ : Any = None , lowercase_ : Tuple = ChannelDimension.FIRST , **lowercase_ : str , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowercase_ , param_name='''size''' , default_to_square=lowercase_ )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' , default_to_square=lowercase_ )
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
snake_case_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
snake_case_ = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 640 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
"""simple docstring"""
snake_case = 0
snake_case = 1
snake_case = 2
@add_end_docstrings(snake_case_ )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : str , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_A = None
if self.model.config.prefix is not None:
_A = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_A = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_A , _A , _A = self._sanitize_parameters(prefix=__UpperCAmelCase , **self._forward_params )
_A = {**self._preprocess_params, **preprocess_params}
_A = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
_A = {}
if prefix is not None:
_A = prefix
if prefix:
_A = self.tokenizer(
__UpperCAmelCase , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=self.framework )
_A = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, \'hole\']" )
_A = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
_A = generate_kwargs
_A = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
_A = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
_A = ReturnType.TENSORS
if return_type is not None:
_A = return_type
if clean_up_tokenization_spaces is not None:
_A = clean_up_tokenization_spaces
if stop_sequence is not None:
_A = self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_A = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self : List[Any] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : int ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*__UpperCAmelCase , **__UpperCAmelCase )
def __call__( self : Optional[int] , __UpperCAmelCase : Any , **__UpperCAmelCase : str ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]="" , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : int ):
'''simple docstring'''
_A = self.tokenizer(
prefix + prompt_text , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=self.framework )
_A = prompt_text
if handle_long_generation == "hole":
_A = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_A = generate_kwargs["max_new_tokens"]
else:
_A = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_A = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
_A = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_A = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = model_inputs["input_ids"]
_A = model_inputs.get("attention_mask" , __UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
_A = None
_A = None
_A = 1
else:
_A = input_ids.shape[0]
_A = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_A = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
_A = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_A = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_A = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_A = self.model.generate(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase )
_A = generated_sequence.shape[0]
if self.framework == "pt":
_A = generated_sequence.reshape(__UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_A = tf.reshape(__UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]=ReturnType.FULL_TEXT , __UpperCAmelCase : int=True ):
'''simple docstring'''
_A = model_outputs["generated_sequence"][0]
_A = model_outputs["input_ids"]
_A = model_outputs["prompt_text"]
_A = generated_sequence.numpy().tolist()
_A = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_A = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_A = self.tokenizer.decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_A = 0
else:
_A = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
_A = prompt_text + text[prompt_length:]
else:
_A = text[prompt_length:]
_A = {"generated_text": all_text}
records.append(__UpperCAmelCase )
return records
| 330 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: int ) -> Union[str, Any]:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: int , ) -> Optional[int]:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: int , ) -> str:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
a = AltDiffusionPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
_SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextModel(_A)
_SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""")
_SCREAMING_SNAKE_CASE : str = 7_7
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCAmelCase ( self : List[str] , _A : Optional[int] , _A : Tuple=0):
"""simple docstring"""
if str(_A).startswith("""mps"""):
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_A)
else:
_SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A).manual_seed(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self : str):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : List[str] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_SCREAMING_SNAKE_CASE : Optional[int] = RobertaSeriesModelWithTransformation(_A)
_SCREAMING_SNAKE_CASE : str = text_encoder
_SCREAMING_SNAKE_CASE : Union[str, Any] = AltDiffusionPipeline(**_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe.to(_A)
alt_pipe.set_progress_bar_config(disable=_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(_A)
_SCREAMING_SNAKE_CASE : Tuple = """A photo of an astronaut"""
_SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe(**_A)
_SCREAMING_SNAKE_CASE : Tuple = output.images
_SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : str = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Union[str, Any] = PNDMScheduler(skip_prk_steps=_A)
torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Tuple = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_SCREAMING_SNAKE_CASE : Optional[int] = RobertaSeriesModelWithTransformation(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = text_encoder
_SCREAMING_SNAKE_CASE : Union[str, Any] = AltDiffusionPipeline(**_A)
_SCREAMING_SNAKE_CASE : Dict = alt_pipe.to(_A)
alt_pipe.set_progress_bar_config(disable=_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(_A)
_SCREAMING_SNAKE_CASE : Any = alt_pipe(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = output.images
_SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=_A)
_SCREAMING_SNAKE_CASE : Tuple = alt_pipe.to(_A)
alt_pipe.set_progress_bar_config(disable=_A)
_SCREAMING_SNAKE_CASE : List[Any] = """A painting of a squirrel eating a burger"""
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="""np""")
_SCREAMING_SNAKE_CASE : Tuple = output.images
_SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=_A , safety_checker=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe.to(_A)
alt_pipe.set_progress_bar_config(disable=_A)
_SCREAMING_SNAKE_CASE : List[str] = """A painting of a squirrel eating a burger"""
_SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Any = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type="""numpy""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
_SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_SCREAMING_SNAKE_CASE : List[str] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 338 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowercase_ ( a_ ):
__magic_name__ : int = 0
__magic_name__ : bool = False
__magic_name__ : float = 3.0
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : Tuple ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=_lowercase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def _lowerCAmelCase ( self : Union[str, Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase__ : Union[str, Any] = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase__ : List[Any] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase__ : Any = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _lowercase )
@require_multi_gpu
def _lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowercase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCAmelCase = torch.nn.Linear(100, 200)
__UpperCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCAmelCase = ""
__UpperCAmelCase = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = 'xlm'
lowercase_ : str = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self, lowerCamelCase=3_01_45, lowerCamelCase=20_48, lowerCamelCase=12, lowerCamelCase=16, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=True, lowerCamelCase=5_12, lowerCamelCase=20_48**-0.5, lowerCamelCase=1E-12, lowerCamelCase=0.0_2, lowerCamelCase=0, lowerCamelCase=1, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=5, lowerCamelCase=True, lowerCamelCase="first", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=0.1, lowerCamelCase=5, lowerCamelCase=5, lowerCamelCase=0, lowerCamelCase=0, lowerCamelCase=2, lowerCamelCase=0, **lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = vocab_size
_lowercase : Optional[int] = emb_dim
_lowercase : Tuple = n_layers
_lowercase : int = n_heads
_lowercase : Tuple = dropout
_lowercase : str = attention_dropout
_lowercase : Optional[int] = gelu_activation
_lowercase : int = sinusoidal_embeddings
_lowercase : Union[str, Any] = causal
_lowercase : List[Any] = asm
_lowercase : List[str] = n_langs
_lowercase : Union[str, Any] = use_lang_emb
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Union[str, Any] = bos_index
_lowercase : List[str] = eos_index
_lowercase : Any = pad_index
_lowercase : Union[str, Any] = unk_index
_lowercase : List[Any] = mask_index
_lowercase : Optional[int] = is_encoder
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[Any] = embed_init_std
_lowercase : List[str] = init_std
_lowercase : Tuple = summary_type
_lowercase : str = summary_use_proj
_lowercase : Any = summary_activation
_lowercase : List[str] = summary_proj_to_labels
_lowercase : List[str] = summary_first_dropout
_lowercase : Any = start_n_top
_lowercase : Optional[Any] = end_n_top
_lowercase : List[Any] = mask_token_id
_lowercase : List[Any] = lang_id
if "n_words" in kwargs:
_lowercase : int = kwargs['n_words']
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 89 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[str] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
"""simple docstring"""
import copy
import re
class __a :
"""simple docstring"""
__UpperCamelCase : Optional[Any] = 'hp'
__UpperCamelCase : str = {}
__UpperCamelCase : Dict = None
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Dict = prefix
lowerCAmelCase__ : Dict = defaults
cls.build_naming_info()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( snake_case , snake_case ):
"""simple docstring"""
if len(snake_case ) == 0:
return ""
lowerCAmelCase__ : str = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(snake_case ) + 1 ):
lowerCAmelCase__ : List[Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCAmelCase__ : Tuple = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(snake_case ):
lowerCAmelCase__ : List[Any] = ""
while integer != 0:
lowerCAmelCase__ : Any = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
lowerCAmelCase__ : Union[str, Any] = 0
while True:
lowerCAmelCase__ : str = word + "#" + int_to_alphabetic(snake_case )
if sword in info["reverse_short_word"]:
continue
else:
lowerCAmelCase__ : int = sword
break
lowerCAmelCase__ : Optional[int] = short_word
lowerCAmelCase__ : Any = word
return short_word
@staticmethod
def SCREAMING_SNAKE_CASE_ ( snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Dict = param_name.split("_" )
lowerCAmelCase__ : List[str] = [TrialShortNamer.shortname_for_word(snake_case , snake_case ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCAmelCase__ : Optional[Any] = ["", "_"]
for separator in separators:
lowerCAmelCase__ : Union[str, Any] = separator.join(snake_case )
if shortname not in info["reverse_short_param"]:
lowerCAmelCase__ : Tuple = shortname
lowerCAmelCase__ : List[str] = param_name
return shortname
return param_name
@staticmethod
def SCREAMING_SNAKE_CASE_ ( snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = TrialShortNamer.shortname_for_key(snake_case , snake_case )
lowerCAmelCase__ : str = short_name
lowerCAmelCase__ : Union[str, Any] = param_name
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
lowerCAmelCase__ : Dict = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
lowerCAmelCase__ : int = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(snake_case , snake_case )
lowerCAmelCase__ : List[Any] = info
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , snake_case ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCAmelCase__ : List[str] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCAmelCase__ : Tuple = cls.NAMING_INFO["short_param"][k]
if isinstance(snake_case , snake_case ):
lowerCAmelCase__ : int = 1 if v else 0
lowerCAmelCase__ : List[str] = "" if isinstance(snake_case , (int, float) ) else "-"
lowerCAmelCase__ : Optional[Any] = F"""{key}{sep}{v}"""
name.append(snake_case )
return "_".join(snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCAmelCase__ : List[Any] = []
else:
lowerCAmelCase__ : Optional[int] = repr.split("_" )
lowerCAmelCase__ : str = {}
for value in values:
if "-" in value:
lowerCAmelCase__ , lowerCAmelCase__ : Any = value.split("-" )
else:
lowerCAmelCase__ : Optional[int] = re.sub("[0-9.]" , "" , snake_case )
lowerCAmelCase__ : Optional[Any] = float(re.sub("[^0-9.]" , "" , snake_case ) )
lowerCAmelCase__ : str = cls.NAMING_INFO["reverse_short_param"][p_k]
lowerCAmelCase__ : Dict = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCAmelCase__ : List[str] = cls.DEFAULTS[k]
return parameters
| 453 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__lowercase ):
__a = ['torch', 'scipy']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ["""torch""", """scipy"""] ) | 518 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Any = 'roberta-prelayernorm'
def __init__( self : List[Any] , _UpperCamelCase : int=50_265 , _UpperCamelCase : List[Any]=768 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Union[str, Any]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Any=512 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Any=1e-12 , _UpperCamelCase : int=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[str] , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: Any = vocab_size
_lowercase: Any = hidden_size
_lowercase: Dict = num_hidden_layers
_lowercase: Any = num_attention_heads
_lowercase: List[str] = hidden_act
_lowercase: str = intermediate_size
_lowercase: str = hidden_dropout_prob
_lowercase: str = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: Any = type_vocab_size
_lowercase: Dict = initializer_range
_lowercase: List[Any] = layer_norm_eps
_lowercase: List[str] = position_embedding_type
_lowercase: Any = use_cache
_lowercase: Dict = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
if self.task == "multiple-choice":
_lowercase: Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 226 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_UpperCAmelCase : List[str] = TypeVar("_T")
class lowerCAmelCase_ ( Generic[_T] ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any = None ):
lowerCAmelCase__ = list(iterable or [] )
lowerCAmelCase__ = []
def __len__( self : Any ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ):
return f'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
self._stacka.append(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
lowerCAmelCase__ = self._stacka.pop
lowerCAmelCase__ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
for param in module.parameters():
snake_case_ = False
def __magic_name__ ( ) -> str:
'''simple docstring'''
snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = datetime.now()
snake_case_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 640 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['pixel_values']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] = True , __UpperCAmelCase : Dict = 32 , __UpperCAmelCase : Any=PILImageResampling.BILINEAR , __UpperCAmelCase : Any = True , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = do_resize
_A = do_rescale
_A = size_divisor
_A = resample
super().__init__(**__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any = None , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A , _A = get_image_size(__UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
_A = height // size_divisor * size_divisor
_A = width // size_divisor * size_divisor
_A = resize(__UpperCAmelCase , (new_h, new_w) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
return image
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple = None , **__UpperCAmelCase : Any ):
'''simple docstring'''
return rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple = None , __UpperCAmelCase : List[str] = None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[Any] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = ChannelDimension.FIRST , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = do_resize if do_resize is not None else self.do_resize
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = size_divisor if size_divisor is not None else self.size_divisor
_A = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
_A = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__UpperCAmelCase ) for img in images]
if do_resize:
_A = [self.resize(__UpperCAmelCase , size_divisor=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
_A = [self.rescale(__UpperCAmelCase , scale=1 / 255 ) for image in images]
_A = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_A = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 330 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowercase : Optional[int] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=None , a__=True , a__=True , a__=None , ) -> List[str]:
A = size if size is not None else {"""height""": 20, """width""": 20}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = size
A = do_normalize
A = do_convert_rgb
A = [512, 1024, 2048, 4096]
A = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _UpperCAmelCase ( self ) -> List[str]:
A = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
A = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _UpperCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
A = PixaStructImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> List[str]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """do_convert_rgb""" ) )
def _UpperCAmelCase ( self ) -> List[str]:
A = self.image_processor_tester.prepare_dummy_image()
A = self.image_processing_class(**self.image_processor_dict )
A = 2048
A = image_processor(a__ , return_tensors="""pt""" , max_patches=a__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def _UpperCAmelCase ( self ) -> int:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
A = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A = image_processor(
a__ , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
A = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a__ ):
A = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
A = """Hello"""
A = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=a__ , header_text=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A = image_processor(
a__ , return_tensors="""pt""" , max_patches=a__ , header_text=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
A = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A = image_processor(
a__ , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _UpperCAmelCase ( self ) -> int:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
A = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A = image_processor(
a__ , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _UpperCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Optional[int]:
A = PixaStructImageProcessingTester(self , num_channels=4 )
A = 3
@property
def _UpperCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """do_convert_rgb""" ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
A = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A = image_processor(
a__ , return_tensors="""pt""" , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 641 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = 'decision_transformer'
a = ['past_key_values']
a = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , _A : Any=1_7 , _A : str=4 , _A : Tuple=1_2_8 , _A : int=4_0_9_6 , _A : str=True , _A : List[str]=1 , _A : int=1_0_2_4 , _A : List[str]=3 , _A : int=1 , _A : Optional[Any]=None , _A : Union[str, Any]="relu" , _A : Union[str, Any]=0.1 , _A : Optional[Any]=0.1 , _A : Dict=0.1 , _A : Any=1e-5 , _A : List[Any]=0.02 , _A : Tuple=True , _A : Optional[Any]=True , _A : Dict=5_0_2_5_6 , _A : Optional[int]=5_0_2_5_6 , _A : Union[str, Any]=False , _A : Any=False , **_A : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = state_dim
_SCREAMING_SNAKE_CASE : Optional[int] = act_dim
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : Any = max_ep_len
_SCREAMING_SNAKE_CASE : Tuple = action_tanh
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = n_positions
_SCREAMING_SNAKE_CASE : List[str] = n_layer
_SCREAMING_SNAKE_CASE : List[Any] = n_head
_SCREAMING_SNAKE_CASE : List[str] = n_inner
_SCREAMING_SNAKE_CASE : List[str] = activation_function
_SCREAMING_SNAKE_CASE : Any = resid_pdrop
_SCREAMING_SNAKE_CASE : List[Any] = embd_pdrop
_SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : Any = scale_attn_weights
_SCREAMING_SNAKE_CASE : Dict = use_cache
_SCREAMING_SNAKE_CASE : Dict = scale_attn_by_inverse_layer_idx
_SCREAMING_SNAKE_CASE : Dict = reorder_and_upcast_attn
_SCREAMING_SNAKE_CASE : Any = bos_token_id
_SCREAMING_SNAKE_CASE : Dict = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A)
| 338 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase_ ( a_ ):
__magic_name__ : Union[str, Any] = ['input_features', 'attention_mask']
def __init__( self : List[str] , _lowercase : List[str]=8_0 , _lowercase : Tuple=1_6_0_0_0 , _lowercase : Optional[int]=8_0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=True , _lowercase : Optional[int]=True , _lowercase : int=True , **_lowercase : Optional[int] , ):
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
lowerCAmelCase__ : Tuple = num_mel_bins
lowerCAmelCase__ : List[Any] = do_ceptral_normalize
lowerCAmelCase__ : Dict = normalize_means
lowerCAmelCase__ : str = normalize_vars
lowerCAmelCase__ : Tuple = True
def _lowerCAmelCase ( self : str , _lowercase : Union[str, Any] , ):
lowerCAmelCase__ : Optional[Any] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase__ : List[Any] = torch.from_numpy(_lowercase ).unsqueeze(0 )
lowerCAmelCase__ : str = ta_kaldi.fbank(_lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCAmelCase ( _lowercase : str , _lowercase : Optional[int] , _lowercase : Union[str, Any] = True , _lowercase : List[Any] = True , _lowercase : Optional[Any] = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
lowerCAmelCase__ : Tuple = x[:input_length].mean(axis=0 )
lowerCAmelCase__ : str = np.subtract(_lowercase , _lowercase )
if normalize_vars:
lowerCAmelCase__ : Optional[Any] = x[:input_length].std(axis=0 )
lowerCAmelCase__ : List[str] = np.divide(_lowercase , _lowercase )
if input_length < x.shape[0]:
lowerCAmelCase__ : Any = padding_value
# make sure array is in float32
lowerCAmelCase__ : Tuple = x.astype(np.floataa )
return x
def _lowerCAmelCase ( self : Tuple , _lowercase : Optional[int] , _lowercase : Optional[int] = None ):
lowerCAmelCase__ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowercase , _lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_lowercase , _lowercase )
]
def __call__( self : Optional[int] , _lowercase : Any , _lowercase : Dict = False , _lowercase : Optional[int] = None , _lowercase : str = False , _lowercase : List[Any] = None , _lowercase : Union[str, Any] = None , _lowercase : Tuple = None , _lowercase : Optional[Any] = None , **_lowercase : Optional[int] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase__ : List[str] = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase__ : Dict = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : int = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
lowerCAmelCase__ : List[Any] = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Dict = [raw_speech]
# extract fbank features
lowerCAmelCase__ : Optional[int] = [self._extract_fbank_features(_lowercase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase__ : Any = BatchFeature({"input_features": features} )
lowerCAmelCase__ : Any = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
lowerCAmelCase__ : int = padded_inputs.get("input_features" )
if isinstance(input_features[0] , _lowercase ):
lowerCAmelCase__ : List[Any] = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase__ : List[str] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase__ : List[Any] = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase__ : List[str] = (
np.array(_lowercase , dtype=np.intaa )
if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ : Union[str, Any] = self.normalize(
padded_inputs["input_features"] , attention_mask=_lowercase )
if return_tensors is not None:
lowerCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Tuple = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if hor == 128:
lowerCAmelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCAmelCase__ = (32, 128, 256)
lowerCAmelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCAmelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCAmelCase__ = (32, 64, 128, 256)
lowerCAmelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCAmelCase__ = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCAmelCase__ = UNetaDModel(**lowerCAmelCase__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowerCAmelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
hf_value_function.load_state_dict(lowerCAmelCase__ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCAmelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCAmelCase__ = model
lowerCAmelCase__ = UNetaDModel(**lowerCAmelCase__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowerCAmelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
hf_value_function.load_state_dict(lowerCAmelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 644 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , snake_case="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(snake_case , self ).__init__()
lowerCAmelCase__ : List[str] = AutoModel.from_pretrained(snake_case , return_dict=snake_case )
lowerCAmelCase__ : Tuple = torch.nn.CosineSimilarity(3 , 1e-08 )
lowerCAmelCase__ : Tuple = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE_ ( self , **snake_case ):
"""simple docstring"""
return self.bert(**snake_case ).last_hidden_state
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = W_supports["sizes"].tolist()
lowerCAmelCase__ : int = W_supports["start_token_id"].item()
lowerCAmelCase__ : List[str] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase__ : int = self.BERT(**snake_case )
lowerCAmelCase__ : int = self.BERT(**snake_case )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = W_supports["input_ids"] == start_token_id
lowerCAmelCase__ : int = W_supports["input_ids"] == end_token_id
for i, size in enumerate(snake_case ):
if i == 0:
lowerCAmelCase__ : List[Any] = 0
else:
lowerCAmelCase__ : Dict = support_sizes[i - 1]
lowerCAmelCase__ : int = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase__ : Dict = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase__ : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase__ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase__ : Tuple = torch.vstack((p_starts, p_start) )
lowerCAmelCase__ : List[str] = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase__ : Tuple = p_start
lowerCAmelCase__ : Dict = p_end
return p_starts, p_ends
| 453 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _A ( unittest.TestCase ):
__a = JukeboxTokenizer
__a = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase ( self ):
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_UpperCAmelCase = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ):
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_UpperCAmelCase = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 518 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowerCAmelCase ( __magic_name__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( ):
_lowercase: List[str] = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def __lowerCAmelCase ( __magic_name__ = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda __magic_name__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Optional[Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Any ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : Any ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCAmelCase__ = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowerCAmelCase__ )
trainer.save_metrics('''train''' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCAmelCase__ = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowerCAmelCase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowerCAmelCase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if "model" in orig_key:
snake_case_ = orig_key.replace('''model.''', '''''' )
if "norm1" in orig_key:
snake_case_ = orig_key.replace('''norm1''', '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
snake_case_ = orig_key.replace('''norm2''', '''output.LayerNorm''' )
if "norm" in orig_key:
snake_case_ = orig_key.replace('''norm''', '''LayerNorm''' )
if "transformer" in orig_key:
snake_case_ = orig_key.split('''.''' )[0].split('''_''' )[-1]
snake_case_ = orig_key.replace(F"transformer_{layer_num}", F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
snake_case_ = orig_key.replace('''mha.attn''', '''attention.self''' )
if "mha" in orig_key:
snake_case_ = orig_key.replace('''mha''', '''attention''' )
if "W_q" in orig_key:
snake_case_ = orig_key.replace('''W_q''', '''self.query''' )
if "W_k" in orig_key:
snake_case_ = orig_key.replace('''W_k''', '''self.key''' )
if "W_v" in orig_key:
snake_case_ = orig_key.replace('''W_v''', '''self.value''' )
if "ff1" in orig_key:
snake_case_ = orig_key.replace('''ff1''', '''intermediate.dense''' )
if "ff2" in orig_key:
snake_case_ = orig_key.replace('''ff2''', '''output.dense''' )
if "ff" in orig_key:
snake_case_ = orig_key.replace('''ff''', '''output.dense''' )
if "mlm_class" in orig_key:
snake_case_ = orig_key.replace('''mlm.mlm_class''', '''cls.predictions.decoder''' )
if "mlm" in orig_key:
snake_case_ = orig_key.replace('''mlm''', '''cls.predictions.transform''' )
if "cls" not in orig_key:
snake_case_ = '''yoso.''' + orig_key
return orig_key
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(lowerCAmelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case_ = val
snake_case_ = orig_state_dict['''cls.predictions.decoder.bias''']
snake_case_ = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = torch.load(lowerCAmelCase__, map_location='''cpu''' )['''model_state_dict''']
snake_case_ = YosoConfig.from_json_file(lowerCAmelCase__ )
snake_case_ = YosoForMaskedLM(lowerCAmelCase__ )
snake_case_ = convert_checkpoint_helper(config.max_position_embeddings, lowerCAmelCase__ )
print(model.load_state_dict(lowerCAmelCase__ ) )
model.eval()
model.save_pretrained(lowerCAmelCase__ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a : int = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 640 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case = None
def __lowercase ( __lowercase , __lowercase , ) -> Dict:
'''simple docstring'''
import pyspark
def generate_fn():
_A = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
_A = df_with_partition_id.select("*" ).where(F'''part_id = {partition_id}''' ).drop("part_id" )
_A = partition_df.collect()
_A = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCAmelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=None , ):
'''simple docstring'''
_A = df
_A = partition_order or range(self.df.rdd.getNumPartitions() )
_A = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[str] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = self.split_shard_indices_by_worker(__UpperCAmelCase , __UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCAmelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
snake_case = SparkConfig
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] = None , __UpperCAmelCase : Optional[Any] = None , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
import pyspark
_A = pyspark.sql.SparkSession.builder.getOrCreate()
_A = df
_A = working_dir
super().__init__(
cache_dir=__UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **__UpperCAmelCase , )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
def create_cache_and_write_probe(__UpperCAmelCase : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__UpperCAmelCase )
_A = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__UpperCAmelCase , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_A = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(__UpperCAmelCase : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
_A = self.df.count()
_A = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_A = (
self.df.limit(__UpperCAmelCase )
.repartition(1 )
.mapInArrow(__UpperCAmelCase , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_A = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_A = min(__UpperCAmelCase , int(approx_total_size / max_shard_size ) )
_A = self.df.repartition(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , ):
'''simple docstring'''
import pyspark
_A = ParquetWriter if file_format == "parquet" else ArrowWriter
_A = os.path.join(self._working_dir , os.path.basename(__UpperCAmelCase ) ) if self._working_dir else fpath
_A = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_A = self.config.features
_A = self._writer_batch_size
_A = self._fs.storage_options
def write_arrow(__UpperCAmelCase : Optional[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_A = pyspark.TaskContext().taskAttemptId()
_A = next(__UpperCAmelCase , __UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
_A = 0
_A = writer_class(
features=__UpperCAmelCase , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
_A = pa.Table.from_batches([first_batch] )
writer.write_table(__UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_A , _A = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
_A = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
_A = pa.Table.from_batches([batch] )
writer.write_table(__UpperCAmelCase )
if writer._num_bytes > 0:
_A , _A = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__UpperCAmelCase ) ):
_A = os.path.join(os.path.dirname(__UpperCAmelCase ) , os.path.basename(__UpperCAmelCase ) )
shutil.move(__UpperCAmelCase , __UpperCAmelCase )
_A = (
self.df.mapInArrow(__UpperCAmelCase , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any = "arrow" , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Any] = None , **__UpperCAmelCase : Any , ):
'''simple docstring'''
self._validate_cache_dir()
_A = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__UpperCAmelCase )
_A = not is_remote_filesystem(self._fs )
_A = os.path.join if is_local else posixpath.join
_A = "-TTTTT-SSSSS-of-NNNNN"
_A = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_A = path_join(self._output_dir , __UpperCAmelCase )
_A = 0
_A = 0
_A = 0
_A = []
_A = []
for task_id, content in self._prepare_split_single(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__UpperCAmelCase )
_A = total_num_examples
_A = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_A = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_A = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , ):
rename(
__UpperCAmelCase , fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , f'''{global_shard_id:05d}''' ).replace("NNNNN" , f'''{total_shards:05d}''' ) , )
_A = []
_A = 0
for i in range(len(__UpperCAmelCase ) ):
_A , _A = task_id_and_num_shards[i]
for shard_id in range(__UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__UpperCAmelCase , len(__UpperCAmelCase ) ).map(lambda __UpperCAmelCase : _rename_shard(*__UpperCAmelCase ) ).collect()
else:
# don't use any pattern
_A = 0
_A = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace(__UpperCAmelCase , "" ) , )
def lowerCAmelCase ( self : str , __UpperCAmelCase : int , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 330 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : Optional[Any] = logging.getLogger(__name__)
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] ) -> List[Any]:
"""simple docstring"""
if os.path.exists(lowerCAmelCase__ ):
if os.path.exists(os.path.join(lowerCAmelCase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , """config.json""" ) ):
os.remove(os.path.join(lowerCAmelCase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
def _lowerCAmelCase ( UpperCamelCase__: Any , UpperCamelCase__: Dict=False ) -> Optional[int]:
"""simple docstring"""
A = 2
if unlogit:
A = torch.pow(lowerCAmelCase__ , lowerCAmelCase__ )
A = p * torch.log(lowerCAmelCase__ )
A = 0
return -plogp.sum(dim=-1 )
def _lowerCAmelCase ( UpperCamelCase__: Tuple ) -> Tuple:
"""simple docstring"""
logger.info("""lv, h >\t""" + """\t""".join(f'{x + 1}' for x in range(len(lowerCAmelCase__ ) ) ) )
for row in range(len(lowerCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:d}' for x in tensor[row].cpu().data ) )
def _lowerCAmelCase ( UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Optional[Any]=False ) -> Tuple:
"""simple docstring"""
A , A = model.config.num_hidden_layers, model.config.num_attention_heads
A = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
A = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
if head_mask is None:
A = torch.ones(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A = None
A = 0.0
A = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
A = tuple(t.to(args.device ) for t in inputs )
((A ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A , A , A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase__ ):
A = entropy(attn.detach() , lowerCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A = 2
A = torch.pow(torch.pow(lowerCAmelCase__ , lowerCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowerCAmelCase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowerCAmelCase__ )
logger.info("""Head ranked by importance scores""" )
A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A = torch.arange(
head_importance.numel() , device=args.device )
A = head_ranks.view_as(lowerCAmelCase__ )
print_ad_tensor(lowerCAmelCase__ )
return attn_entropy, head_importance, total_loss
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: Any ) -> List[str]:
"""simple docstring"""
A , A , A = compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ )
A = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowerCAmelCase__ , original_score * args.masking_threshold )
A = torch.ones_like(lowerCAmelCase__ )
A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A = original_score
while current_score >= original_score * args.masking_threshold:
A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A = float("""Inf""" )
A = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
A = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
A = new_head_mask.view(-1 )
A = 0.0
A = new_head_mask.view_as(lowerCAmelCase__ )
A = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase__ )
# Compute metric and head importance again
A , A , A = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
A = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowerCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowerCAmelCase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowerCAmelCase ( UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] ) -> str:
"""simple docstring"""
A = datetime.now()
A , A , A = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
A = 1 / loss
A = datetime.now() - before_time
A = sum(p.numel() for p in model.parameters() )
A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A = [
v,
]
assert sum(len(lowerCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase__ )
A = sum(p.numel() for p in model.parameters() )
A = datetime.now()
A , A , A = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , actually_pruned=lowerCAmelCase__ , )
A = 1 / loss
A = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowerCAmelCase__ , lowerCAmelCase__ , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(lowerCAmelCase__ , args.output_dir )
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCAmelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowerCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowerCAmelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowerCAmelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowerCAmelCase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowerCAmelCase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowerCAmelCase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowerCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowerCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A = torch.device("""cuda""" , args.local_rank )
A = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A = nn.parallel.DistributedDataParallel(
lowerCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase__ )
elif args.n_gpu > 1:
A = nn.DataParallel(lowerCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase__ )
# Prepare dataset
A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A = (torch.from_numpy(lowerCAmelCase__ ),)
A = TensorDataset(*lowerCAmelCase__ )
A = RandomSampler(lowerCAmelCase__ )
A = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A = mask_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
prune_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 641 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase_ ( a_ ):
def __init__( self : Any , *_lowercase : List[Any] , **_lowercase : str ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
SCREAMING_SNAKE_CASE : Tuple = get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = Path(__file__).parent / "model_card_template.md"
SCREAMING_SNAKE_CASE : Union[str, Any] = uuida().hex
SCREAMING_SNAKE_CASE : str = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE : List[Any] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def UpperCamelCase_( lowerCamelCase_ = None ) -> List[Any]:
_lowercase : List[Any] = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + user_agent
return ua
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> str:
if token is None:
_lowercase : Union[str, Any] = HfFolder.get_token()
if organization is None:
_lowercase : Dict = whoami(lowerCAmelCase__ )['name']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(lowerCAmelCase__ , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
_lowercase : Optional[int] = args.hub_token if hasattr(lowerCAmelCase__ , 'hub_token' ) else None
_lowercase : int = get_full_repo_name(lowerCAmelCase__ , token=lowerCAmelCase__ )
_lowercase : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase__ , model_name=lowerCAmelCase__ , repo_name=lowerCAmelCase__ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCAmelCase__ , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
_lowercase : List[Any] = os.path.join(args.output_dir , 'README.md' )
model_card.save(lowerCAmelCase__ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None ) -> Any:
if resolved_file is None or commit_hash is not None:
return commit_hash
_lowercase : int = str(Path(lowerCAmelCase__ ).as_posix() )
_lowercase : Any = re.search(R'snapshots/([^/]+)/' , lowerCAmelCase__ )
if search is None:
return None
_lowercase : Any = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
SCREAMING_SNAKE_CASE : List[str] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(hf_cache_home, "diffusers")
def UpperCamelCase_( lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Any:
if new_cache_dir is None:
_lowercase : List[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
_lowercase : Any = old_diffusers_cache
_lowercase : Tuple = Path(lowerCAmelCase__ ).expanduser()
_lowercase : Union[str, Any] = Path(lowerCAmelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_lowercase : int = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase__ )
new_blob_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
os.replace(lowerCAmelCase__ , lowerCAmelCase__ )
try:
os.symlink(lowerCAmelCase__ , lowerCAmelCase__ )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
SCREAMING_SNAKE_CASE : int = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
SCREAMING_SNAKE_CASE : List[str] = 0
else:
with open(cache_version_file) as f:
try:
SCREAMING_SNAKE_CASE : List[str] = int(f.read())
except ValueError:
SCREAMING_SNAKE_CASE : Any = 0
if cache_version < 1:
SCREAMING_SNAKE_CASE : Dict = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
SCREAMING_SNAKE_CASE : Optional[int] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
"the directory exists and can be written to."
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None ) -> List[Any]:
if variant is not None:
_lowercase : int = weights_name.split('.' )
_lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
_lowercase : Union[str, Any] = '.'.join(lowerCAmelCase__ )
return weights_name
def UpperCamelCase_( lowerCamelCase_ , *,
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ) -> List[str]:
_lowercase : Tuple = str(lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCAmelCase__ ):
if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ):
# Load from a PyTorch checkpoint
_lowercase : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ):
_lowercase : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCAmelCase__ ).base_version ) >= version.parse('0.20.0' )
):
try:
_lowercase : Union[str, Any] = hf_hub_download(
lowerCAmelCase__ , filename=_add_variant(lowerCAmelCase__ , lowerCAmelCase__ ) , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , lowerCAmelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )}\' so that the correct variant file can be added.''' , lowerCAmelCase__ , )
try:
# 2. Load model file as usual
_lowercase : Optional[int] = hf_hub_download(
lowerCAmelCase__ , filename=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 89 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
"""simple docstring"""
import requests
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {"""Content-Type""": """application/json"""}
lowerCAmelCase__ = requests.post(lowerCAmelCase__ , json={"""text""": message_body} , headers=lowerCAmelCase__ )
if response.status_code != 200:
lowerCAmelCase__ = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 644 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.