code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 8.988e9 # units = N * m^s * C^-2
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__lowercase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__lowercase = (COULOMBS_CONSTANT * charge_product / abs(A__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
return None
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return None
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , 'tf' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , 'pt' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
from transformers import BertModel
lowerCamelCase_ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
lowerCamelCase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase_ = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , 'pt' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(SCREAMING_SNAKE_CASE_ , 'tf' , 12 , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(SCREAMING_SNAKE_CASE_ , 'pt' , 12 , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(SCREAMING_SNAKE_CASE_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
from transformers import BertModel
lowerCamelCase_ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase_ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'tf' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase_ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase_ ,lowerCamelCase_ = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase_ ,lowerCamelCase_ = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 42 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = [x.strip() for x in open(_lowerCAmelCase ).readlines()]
_lowerCamelCase : Dict = [x.strip() for x in open(_lowerCAmelCase ).readlines()][: len(_lowerCAmelCase )]
_lowerCamelCase : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
if save_path is not None:
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 44 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase = 16
UpperCamelCase = 32
def A ( lowercase__ : Union[str, Any] ) -> int:
return int(x / 2**20 )
class lowerCAmelCase_ :
"""simple docstring"""
def __enter__( self :Tuple ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase__ :Optional[Any] = torch.cuda.memory_allocated()
return self
def __exit__( self :Optional[int] , *lowerCamelCase__ :Dict ):
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase__ :List[Any] = torch.cuda.memory_allocated()
UpperCamelCase__ :List[Any] = torch.cuda.max_memory_allocated()
UpperCamelCase__ :Any = bamb(self.end - self.begin )
UpperCamelCase__ :Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A ( lowercase__ : Accelerator , lowercase__ : int = 16 , lowercase__ : str = "bert-base-cased" , lowercase__ : int = 320 , lowercase__ : int = 160 , ) -> Union[str, Any]:
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained(lowercase__ )
UpperCamelCase__ :Optional[Any] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f"""train[:{n_train}]""", """validation""": f"""validation[:{n_val}]"""} )
def tokenize_function(lowercase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Any = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def A ( lowercase__ : List[str] , lowercase__ : Dict ) -> int:
# Initialize accelerator
UpperCamelCase__ :Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Dict = config["""lr"""]
UpperCamelCase__ :Optional[int] = int(config["""num_epochs"""] )
UpperCamelCase__ :Union[str, Any] = int(config["""seed"""] )
UpperCamelCase__ :int = int(config["""batch_size"""] )
UpperCamelCase__ :str = args.model_name_or_path
set_seed(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
UpperCamelCase__ :str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase__ :int = 1
UpperCamelCase__ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
UpperCamelCase__ :Dict = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :str = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :List[Any] = 0
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase__ ):
UpperCamelCase__ :List[Any] = model(**lowercase__ )
UpperCamelCase__ :Optional[Any] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase__ :Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowercase__ , default=lowercase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowercase__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowercase__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=1 , help="""Number of train epochs.""" , )
UpperCamelCase__ :List[str] = parser.parse_args()
UpperCamelCase__ :List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 45 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self: Dict ,**__lowerCAmelCase: str ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase : str = deprecated_arg[3:]
setattr(self ,__lowerCAmelCase ,not kwargs.pop(__lowerCAmelCase ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_lowerCamelCase : Optional[Any] = kwargs.pop("torchscript" ,self.torchscript )
_lowerCamelCase : List[str] = kwargs.pop("torch_xla_tpu_print_metrics" ,self.torch_xla_tpu_print_metrics )
_lowerCamelCase : Optional[int] = kwargs.pop("fp16_opt_level" ,self.fpaa_opt_level )
super().__init__(**__lowerCAmelCase )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Trace the models using torchscript'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCAmelCase__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def _lowercase ( self: Tuple ):
'''simple docstring'''
requires_backends(self ,["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
_lowerCamelCase : Optional[int] = torch.device("cpu" )
_lowerCamelCase : Optional[int] = 0
elif is_torch_tpu_available():
_lowerCamelCase : Union[str, Any] = xm.xla_device()
_lowerCamelCase : Tuple = 0
else:
_lowerCamelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_lowerCamelCase : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def _lowercase ( self: Any ):
'''simple docstring'''
requires_backends(self ,["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
requires_backends(self ,["torch"] )
return self._setup_devices[0]
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
requires_backends(self ,["torch"] )
return self._setup_devices[1]
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return self.n_gpu > 0 | 46 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
SCREAMING_SNAKE_CASE__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict=1 , lowerCamelCase_ : Union[str, Any]=2_5_6 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
with open(lowerCamelCase_ , 'r' ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Dict ):
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=True ):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__a : Tuple = os.path.join(lowerCamelCase_ , 'tmp' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__a : str = read_json(os.path.join(lowerCamelCase_ , 'params.json' ) )
__a : str = NUM_SHARDS[model_size]
__a : List[str] = params['n_layers']
__a : Optional[Any] = params['n_heads']
__a : Optional[Any] = n_heads // num_shards
__a : Optional[int] = params['dim']
__a : Union[str, Any] = dim // n_heads
__a : Dict = 10000.0
__a : Tuple = 1.0 / (base ** (torch.arange(0 , lowerCamelCase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__a : Tuple = params['n_kv_heads'] # for GQA / MQA
__a : Union[str, Any] = n_heads_per_shard // num_key_value_heads
__a : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
__a : List[str] = n_heads
__a : List[Any] = n_heads_per_shard
__a : List[str] = dim
# permute for sliced rotary
def permute(lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=n_heads , lowerCamelCase_ : Any=dim , lowerCamelCase_ : List[str]=dim ):
return w.view(lowerCamelCase_ , dima // n_heads // 2 , 2 , lowerCamelCase_ ).transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__a : Union[str, Any] = torch.load(os.path.join(lowerCamelCase_ , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
__a : Any = [
torch.load(os.path.join(lowerCamelCase_ , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(lowerCamelCase_ )
]
__a : Any = 0
__a : List[str] = {'weight_map': {}}
for layer_i in range(lowerCamelCase_ ):
__a : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__a : Dict = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__a : Optional[int] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
__a : List[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ ) )
__a : List[str] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
__a : List[Any] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ )
__a : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(lowerCamelCase_ )] , dim=1 )
__a : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(lowerCamelCase_ )] , dim=0 )
__a : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(lowerCamelCase_ )] , dim=1 )
__a : str = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(lowerCamelCase_ )] , dim=0 )
__a : Union[str, Any] = inv_freq
for k, v in state_dict.items():
__a : Any = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
__a : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__a : Tuple = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
__a : Optional[int] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(lowerCamelCase_ )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(lowerCamelCase_ )] , dim=0 ),
}
for k, v in state_dict.items():
__a : List[Any] = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
# Write configs
__a : Any = {'total_size': param_count * 2}
write_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'pytorch_model.bin.index.json' ) )
__a : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
__a : Tuple = params['multiple_of'] if 'multiple_of' in params else 2_5_6
__a : Any = LlamaConfig(
hidden_size=lowerCamelCase_ , intermediate_size=compute_intermediate_size(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=lowerCamelCase_ , )
config.save_pretrained(lowerCamelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
__a : List[str] = LlamaForCausalLM.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCamelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(lowerCamelCase_ , safe_serialization=lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
# Initialize the tokenizer based on the `spm` model
__a : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
__a : Optional[Any] = tokenizer_class(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : str = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=lowerCamelCase_ , help='Whether or not to save using `safetensors`.' )
__a : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__a : List[str] = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
'''simple docstring'''
# Imports
import numpy as np
class A :
def __init__( self : Optional[int] , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[Any]=None ):
"""simple docstring"""
self.set_matricies(red=__magic_name__ , green=__magic_name__ , blue=__magic_name__ , red_edge=__magic_name__ , nir=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Dict=None , __magic_name__ : Dict=None ):
"""simple docstring"""
if red is not None:
lowerCAmelCase__ = red
if green is not None:
lowerCAmelCase__ = green
if blue is not None:
lowerCAmelCase__ = blue
if red_edge is not None:
lowerCAmelCase__ = red_edge
if nir is not None:
lowerCAmelCase__ = nir
return True
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any]="" , __magic_name__ : Any=None , __magic_name__ : List[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=None ):
"""simple docstring"""
self.set_matricies(red=__magic_name__ , green=__magic_name__ , blue=__magic_name__ , red_edge=__magic_name__ , nir=__magic_name__ )
lowerCAmelCase__ = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int=0.08 , __magic_name__ : Optional[Any]=1.22 , __magic_name__ : Union[str, Any]=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return (self.nir / self.green) - 1
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.nir - self.green
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Any=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Any=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple=None , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.nir / self.red
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.nir / self.red
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 48 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase__ ( snake_case_ :np.ndarray ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowercase__ ( snake_case_ :np.ndarray ):
return (gray > 127) & (gray <= 255)
def lowercase__ ( snake_case_ :np.ndarray , snake_case_ :np.ndarray ):
__UpperCAmelCase = np.zeros_like(snake_case_ )
__UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_lowercase : Optional[Any] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_lowercase : Any = np.array(Image.open(lena_path))
# kernel to be applied
_lowercase : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_lowercase : Dict = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_lowercase : int = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 49 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
from math import factorial
def A__ ( __lowerCAmelCase : int = 20 ):
lowerCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase__ = n // 2
return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 50 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModel.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModel.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __snake_case ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModelForPreTraining.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModelForPreTraining.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __snake_case ( self : Dict ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(a__ , from_pt=a__ )
UpperCAmelCase, UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(a__ , from_tf=a__ )
UpperCAmelCase, UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __snake_case ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __snake_case ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(a__ , from_pt=a__ )
UpperCAmelCase, UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(a__ , from_tf=a__ )
UpperCAmelCase, UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __snake_case ( self : List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , from_pt=a__ )
UpperCAmelCase, UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(a__ , from_tf=a__ )
UpperCAmelCase, UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __snake_case ( self : Tuple ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __snake_case ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase = AutoModelForQuestionAnswering.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
def __snake_case ( self : Tuple ):
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
| 51 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''GLPNFeatureExtractor''']
A = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
_snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
_snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCAmelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__lowercase : str ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase : Dict ={
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__lowercase : Optional[int] ={
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
__lowercase : int ={
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class A ( __lowercase ):
_snake_case =VOCAB_FILES_NAMES
_snake_case =PRETRAINED_VOCAB_FILES_MAP
_snake_case =PRETRAINED_INIT_CONFIGURATION
_snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case =ElectraTokenizer
def __init__( self: Dict , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: Optional[int]="[UNK]" , _lowerCAmelCase: int="[SEP]" , _lowerCAmelCase: Tuple="[PAD]" , _lowerCAmelCase: int="[CLS]" , _lowerCAmelCase: Union[str, Any]="[MASK]" , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: str=None , **_lowerCAmelCase: Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ =getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ =do_lower_case
UpperCAmelCase_ =strip_accents
UpperCAmelCase_ =tokenize_chinese_chars
UpperCAmelCase_ =normalizer_class(**_lowerCAmelCase )
UpperCAmelCase_ =do_lower_case
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int=None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ =[self.sep_token_id]
UpperCAmelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase_ =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 54 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :str = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : str = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Any = "owlvit_text_model"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int=4_9408 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=2048 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : Any="quick_gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : int=0.0_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , SCREAMING_SNAKE_CASE_ : List[Any]=4_9406 , SCREAMING_SNAKE_CASE_ : int=4_9407 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = max_position_embeddings
__snake_case = hidden_act
__snake_case = layer_norm_eps
__snake_case = attention_dropout
__snake_case = initializer_range
__snake_case = initializer_factor
@classmethod
def a ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__snake_case = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Any = "owlvit_vision_model"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int=768 , SCREAMING_SNAKE_CASE_ : List[Any]=3072 , SCREAMING_SNAKE_CASE_ : Any=12 , SCREAMING_SNAKE_CASE_ : List[Any]=12 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Any=768 , SCREAMING_SNAKE_CASE_ : Optional[int]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="quick_gelu" , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1.0 , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = num_channels
__snake_case = image_size
__snake_case = patch_size
__snake_case = hidden_act
__snake_case = layer_norm_eps
__snake_case = attention_dropout
__snake_case = initializer_range
__snake_case = initializer_factor
@classmethod
def a ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "owlvit"
_SCREAMING_SNAKE_CASE : int = True
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=512 , SCREAMING_SNAKE_CASE_ : Dict=2.6_5_9_2 , SCREAMING_SNAKE_CASE_ : Tuple=True , **SCREAMING_SNAKE_CASE_ : Any , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
__snake_case = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__snake_case = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__snake_case = OwlViTTextConfig(**SCREAMING_SNAKE_CASE_ )
__snake_case = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE_ )
__snake_case = projection_dim
__snake_case = logit_scale_init_value
__snake_case = return_dict
__snake_case = 1.0
@classmethod
def a ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def a ( cls : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
__snake_case = {}
__snake_case = text_config
__snake_case = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.text_config.to_dict()
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
class _lowercase ( __lowercase ):
@property
def a ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def a ( self : Optional[int] ) -> float:
return 1e-4
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "ProcessorMixin" , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
__snake_case = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
__snake_case = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return {**text_input_dict, **image_input_dict}
@property
def a ( self : Tuple ) -> int:
return 14
| 56 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
A_ : Any = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 57 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''microsoft/speecht5_tts'''
_lowerCamelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_lowerCamelCase = '''text_reader'''
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ['''text''']
_lowerCamelCase = ['''audio''']
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if self.post_processor is None:
snake_case_ : str = """microsoft/speecht5_hifigan"""
super().setup()
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.pre_processor(text=_lowercase , return_tensors="""pt""" , truncation=_lowercase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
snake_case_ : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
snake_case_ : Union[str, Any] = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_lowercase ).cpu().detach()
| 58 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
lowerCamelCase__: List[str] =order
# a_{0} ... a_{k}
lowerCamelCase__: Tuple =[1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase__: Any =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase__: int =[0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase__: List[Any] =[0.0] * self.order
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list[float] , UpperCAmelCase_ : list[float]) ->None:
'''simple docstring'''
if len(UpperCAmelCase_) < self.order:
lowerCamelCase__: Tuple =[1.0, *a_coeffs]
if len(UpperCAmelCase_) != self.order + 1:
lowerCamelCase__: List[str] =(
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(UpperCAmelCase_)}"""
)
raise ValueError(UpperCAmelCase_)
if len(UpperCAmelCase_) != self.order + 1:
lowerCamelCase__: List[Any] =(
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(UpperCAmelCase_)}"""
)
raise ValueError(UpperCAmelCase_)
lowerCamelCase__: Tuple =a_coeffs
lowerCamelCase__: List[Any] =b_coeffs
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : float) ->float:
'''simple docstring'''
lowerCamelCase__: List[Any] =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase__: Union[str, Any] =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase__: str =self.input_history[:-1]
lowerCamelCase__: Any =self.output_history[:-1]
lowerCamelCase__: int =sample
lowerCamelCase__: Tuple =result
return result
| 59 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
import argparse
lowerCAmelCase_ = '''docs/source/_static/js/custom.js'''
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : List[str] = f.readlines()
snake_case_ : str = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
snake_case_ : str = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase_ = parser.parse_args()
update_custom_js(args.version)
| 60 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = OpenAIGPTTokenizer
snake_case__ = OpenAIGPTTokenizerFast
snake_case__ = True
snake_case__ = False
def a ( self : Dict ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
return "lower newer", "lower newer"
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ = "lower"
lowerCAmelCase__ = ["low", "er</w>"]
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokens + ["<unk>"]
lowerCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
lowerCAmelCase__ = "This is a simple input"
lowerCAmelCase__ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase__ = ("This is a simple input", "This is a pair")
lowerCAmelCase__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
def a ( self : Optional[int] ) -> int:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
pass
| 61 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
SCREAMING_SNAKE_CASE : Optional[int] = gray_code_sequence_string(lowercase )
#
# convert them to integers
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Optional[int] = int(sequence[i] , 2 )
return sequence
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
SCREAMING_SNAKE_CASE : Optional[int] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
SCREAMING_SNAKE_CASE : str = gray_code_sequence_string(bit_count - 1 )
SCREAMING_SNAKE_CASE : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
SCREAMING_SNAKE_CASE : Tuple = "0" + smaller_sequence[i]
sequence.append(lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
SCREAMING_SNAKE_CASE : Dict = "1" + smaller_sequence[i]
sequence.append(lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
a : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a : Optional[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
a : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 63 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
from PIL import Image
def A__ ( snake_case_ : Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= image.size
SCREAMING_SNAKE_CASE__: Dict= 0
SCREAMING_SNAKE_CASE__: str= image.load()
for i in range(snake_case_ ):
for j in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Optional[Any]= pixels[j, i]
mean += pixel
mean //= width * height
for j in range(snake_case_ ):
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: int= 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowercase_ : Tuple = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 64 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 100 , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = x_start
UpperCAmelCase__ : List[Any] = fnc(__UpperCamelCase )
UpperCAmelCase__ : Any = 0.0
for _ in range(__UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase__ : str = (x_end - x_start) / steps + xa
UpperCAmelCase__ : Optional[int] = fnc(__UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase__ : int = xa
UpperCAmelCase__ : Dict = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__UpperCAmelCase = 10
while i <= 10_0000:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 65 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
from __future__ import annotations
UpperCamelCase = tuple[int, int, int]
UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCamelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
_lowercase : Optional[int] = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : int = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
_lowercase : Tuple = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Plugboard setting isn't type string ({type(SCREAMING_SNAKE_CASE )})"""
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
_lowercase : Optional[int] = F"""Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})"""
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F"""'{i}' not in list of symbols"""
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
_lowercase : int = F"""Duplicate symbol ({i})"""
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
_lowercase : Optional[Any] = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
_lowercase : Dict = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
_lowercase : List[str] = text.upper()
_lowercase , _lowercase , _lowercase : List[str] = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
_lowercase , _lowercase , _lowercase : Optional[int] = rotor_position
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Optional[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : Union[str, Any] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
_lowercase : Tuple = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : str = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : List[str] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[str] = reflector[symbol]
# 2nd rotors
_lowercase : List[str] = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Tuple = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Dict = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII."
UpperCamelCase = (1, 1, 1)
UpperCamelCase = "pictures"
UpperCamelCase = (rotora, rotora, rotora)
UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0)) | 67 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
def lowercase__ ( A_: int , A_: list ) -> Any:
"""simple docstring"""
_enforce_args(A_ , A_ )
if n == 0:
return 0
__UpperCAmelCase =float("""-inf""" )
for i in range(1 , n + 1 ):
__UpperCAmelCase =max(
A_ , prices[i - 1] + naive_cut_rod_recursive(n - i , A_ ) )
return max_revue
def lowercase__ ( A_: int , A_: list ) -> List[str]:
"""simple docstring"""
_enforce_args(A_ , A_ )
__UpperCAmelCase =[float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(A_ , A_ , A_ )
def lowercase__ ( A_: int , A_: list , A_: list ) -> Dict:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__UpperCAmelCase =float("""-inf""" )
for i in range(1 , n + 1 ):
__UpperCAmelCase =max(
A_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , A_ , A_ ) , )
__UpperCAmelCase =max_revenue
return max_rev[n]
def lowercase__ ( A_: int , A_: list ) -> Optional[int]:
"""simple docstring"""
_enforce_args(A_ , A_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__UpperCAmelCase =[float("""-inf""" ) for _ in range(n + 1 )]
__UpperCAmelCase =0
for i in range(1 , n + 1 ):
__UpperCAmelCase =max_rev[i]
for j in range(1 , i + 1 ):
__UpperCAmelCase =max(A_ , prices[j - 1] + max_rev[i - j] )
__UpperCAmelCase =max_revenue_i
return max_rev[n]
def lowercase__ ( A_: int , A_: list ) -> Union[str, Any]:
"""simple docstring"""
if n < 0:
__UpperCAmelCase =F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(A_ )
if n > len(A_ ):
__UpperCAmelCase =(
"""Each integral piece of rod must have a corresponding price. """
F'''Got n = {n} but length of prices = {len(A_ )}'''
)
raise ValueError(A_ )
def lowercase__ ( ) -> int:
"""simple docstring"""
__UpperCAmelCase =[6, 10, 12, 15, 20, 23]
__UpperCAmelCase =len(A_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__UpperCAmelCase =36
__UpperCAmelCase =top_down_cut_rod(A_ , A_ )
__UpperCAmelCase =bottom_up_cut_rod(A_ , A_ )
__UpperCAmelCase =naive_cut_rod_recursive(A_ , A_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 68 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : str="" , a_ : List[Any]="train" ):
"""simple docstring"""
assert os.path.isdir(a_ )
__snake_case = []
__snake_case = os.listdir(a_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__snake_case = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ):
continue
self.documents.append(a_ )
def __len__( self : int ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self : List[Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.documents[idx]
__snake_case = document_path.split("/" )[-1]
with open(a_ , encoding="utf-8" ) as source:
__snake_case = source.read()
__snake_case , __snake_case = process_story(a_ )
return document_name, story_lines, summary_lines
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> List[str]:
__snake_case = list(filter(lambda _UpperCAmelCase : len(_UpperCAmelCase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
__snake_case = [_add_missing_period(_UpperCAmelCase ) for line in nonempty_lines]
# gather article lines
__snake_case = []
__snake_case = deque(_UpperCAmelCase )
while True:
try:
__snake_case = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(_UpperCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__snake_case = list(filter(lambda _UpperCAmelCase : not t.startswith("@highlight" ) , _UpperCAmelCase ) )
return story_lines, summary_lines
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> Tuple:
__snake_case = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Dict:
if len(_UpperCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCAmelCase )) )
return sequence
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Dict:
__snake_case = torch.ones_like(_UpperCAmelCase )
__snake_case = sequence == pad_token_id
__snake_case = 0
return mask
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> str:
__snake_case = [tokenizer.encode(_UpperCAmelCase ) for line in story_lines]
__snake_case = [token for sentence in story_lines_token_ids for token in sentence]
__snake_case = [tokenizer.encode(_UpperCAmelCase ) for line in summary_lines]
__snake_case = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
__snake_case = []
for sequence in batch:
__snake_case = -1
__snake_case = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCAmelCase )
return torch.tensor(_UpperCAmelCase )
| 69 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=lowercase , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=lowercase , default=5 )
parser.add_argument('--batch_size' , type=lowercase , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=lowercase , default=1 )
parser.add_argument('--freeze' , type=lowercase , default=lowercase )
parser.add_argument('--learning_rate' , type=lowercase , default=5e-4 )
parser.add_argument('--seed' , type=lowercase , default=0 )
parser.add_argument('--lr_scheduler_type' , type=lowercase , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=lowercase , default=10 )
parser.add_argument('--weight_decay' , type=lowercase , default=0.01 )
parser.add_argument('--output_dir' , type=lowercase , default='./results' )
return parser.parse_args()
lowerCamelCase : List[Any] = load("accuracy")
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = eval_pred
lowerCamelCase_ = np.argmax(lowercase , axis=1 )
return metric.compute(predictions=lowercase , references=lowercase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A_ : Optional[Any] ) -> None:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = trainer
def a__ ( self : Any , A_ : Optional[Any] , A_ : Tuple , A_ : Union[str, Any] , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
if control.should_evaluate:
lowerCamelCase_ = deepcopy(A_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = get_args()
set_seed(args.seed )
lowerCamelCase_ = load_dataset('codeparrot/codecomplex' , split='train' )
lowerCamelCase_ = dataset.train_test_split(test_size=0.2 )
lowerCamelCase_ = train_test['test'].train_test_split(test_size=0.5 )
lowerCamelCase_ = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCamelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCamelCase_ = False
lowerCamelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(lowercase : Union[str, Any] ):
lowerCamelCase_ = tokenizer(example['src'] , truncation=lowercase , max_length=10_24 )
lowerCamelCase_ = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCamelCase_ = train_test_validation.map(
lowercase , batched=lowercase , remove_columns=train_test_validation['train'].column_names , )
lowerCamelCase_ = DataCollatorWithPadding(tokenizer=lowercase )
lowerCamelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
lowerCamelCase_ = Trainer(
model=lowercase , args=lowercase , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=lowercase , data_collator=lowercase , compute_metrics=lowercase , )
print('Training...' )
trainer.add_callback(CustomCallback(lowercase ) )
trainer.train()
if __name__ == "__main__":
main()
| 70 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
'''simple docstring'''
import argparse
import os
import re
_lowerCamelCase = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_lowerCamelCase = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCamelCase = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCamelCase = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCamelCase = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCamelCase = re.compile(R"""\[([^\]]+)\]""")
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = _re_indent.search(_SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str]="" , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[Any] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(_SCREAMING_SNAKE_CASE ):
index += 1
UpperCAmelCase_ : Optional[Any] = ["\n".join(lines[:index] )]
else:
UpperCAmelCase_ : Optional[int] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_ : Tuple = [lines[index]]
index += 1
while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
if index < len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCAmelCase_ : int = [lines[index + 1]]
index += 1
else:
UpperCAmelCase_ : Any = []
else:
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
def _inner(_SCREAMING_SNAKE_CASE : List[Any] ):
return key(_SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_SCREAMING_SNAKE_CASE : str ):
return x
if key is None:
UpperCAmelCase_ : Optional[Any] = noop
# Constants are all uppercase, they go first.
UpperCAmelCase_ : str = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_ : List[Any] = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_ : Any = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()]
UpperCAmelCase_ : Tuple = ignore_underscore(_SCREAMING_SNAKE_CASE )
return sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
def _replace(_SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase_ : Dict = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCAmelCase_ : Union[str, Any] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ : Dict = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]"
UpperCAmelCase_ : str = import_statement.split("\n" )
if len(_SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_ : Tuple = 2 if lines[1].strip() == "[" else 1
UpperCAmelCase_ : Optional[int] = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_ : str = sort_objects(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )
UpperCAmelCase_ : Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase_ : Tuple = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ : Union[str, Any] = keys[:-1]
UpperCAmelCase_ : Union[str, Any] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(_SCREAMING_SNAKE_CASE )] )
return "\n".join(_SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_ : int = _re_bracket_content.sub(_replace , _SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any]=True ) -> List[str]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_ : Any = split_code_in_indented_blocks(
_SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_ : Optional[Any] = main_blocks[block_idx]
UpperCAmelCase_ : List[Any] = block.split("\n" )
# Get to the start of the imports.
UpperCAmelCase_ : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(_SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_ : Union[str, Any] = "\n".join(block_lines[line_idx:-1] )
UpperCAmelCase_ : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_ : Tuple = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE , indent_level=_SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_ : Optional[int] = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_ : int = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_ : Optional[int] = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None]
UpperCAmelCase_ : str = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Any = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_ : Union[str, Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_ : Dict = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
UpperCAmelCase_ : str = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=_SCREAMING_SNAKE_CASE )
if result:
UpperCAmelCase_ : List[Any] = [os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowerCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 71 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : str ) -> bool:
'''simple docstring'''
lowercase =0
for ch in input_str:
lowercase =ord(lowercase_ )
lowercase =pow(2 , lowercase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : str = 16
a_ : List[Any] = 32
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCAmelCase):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Optional[Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase) == "1":
SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'])
SCREAMING_SNAKE_CASE = int(config['seed'])
SCREAMING_SNAKE_CASE = int(config['batch_size'])
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCAmelCase)
def inner_training_loop(_UpperCAmelCase):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCAmelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device)
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase)
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Now we train the model
for epoch in range(_UpperCAmelCase):
model.train()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(_UpperCAmelCase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase)
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase_ = get_logger(__name__)
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case=0 ):
"""simple docstring"""
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__SCREAMING_SNAKE_CASE : List[str] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(snake_case , snake_case )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(snake_case , snake_case )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__SCREAMING_SNAKE_CASE : List[Any] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , snake_case )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(snake_case , snake_case )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(snake_case , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(F'''Saving model to {ckpt_dir}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__SCREAMING_SNAKE_CASE : int = os.path.join(snake_case , snake_case )
logger.info(F'''Loading model from {input_model_file}''' )
__SCREAMING_SNAKE_CASE : str = torch.load(snake_case )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__SCREAMING_SNAKE_CASE : Optional[int] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(snake_case , snake_case )
logger.info(F'''Loading model from {input_model_file}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
os.path.join(snake_case , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(snake_case )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=0 ):
"""simple docstring"""
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__SCREAMING_SNAKE_CASE : Optional[Any] = FSDP.optim_state_dict(snake_case , snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__SCREAMING_SNAKE_CASE : Dict = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case , snake_case )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(snake_case , snake_case )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__SCREAMING_SNAKE_CASE : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__SCREAMING_SNAKE_CASE : int = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__SCREAMING_SNAKE_CASE : List[Any] = (
os.path.join(snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__SCREAMING_SNAKE_CASE : str = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case ) , )
__SCREAMING_SNAKE_CASE : Tuple = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__SCREAMING_SNAKE_CASE : List[str] = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case )
optimizer.load_state_dict(snake_case )
| 74 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a__ ( lowerCAmelCase__ = True , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCAmelCase__ : Union[str, Any] = False
if main_process_only:
UpperCAmelCase__ : List[str] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ , disable=lowerCAmelCase__ )
| 75 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a_ = datasets.logging.get_logger(__name__)
a_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
a_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
a_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
a_ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def _lowerCamelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__lowercase : Union[str, Any] = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase : List[str] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase : int = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase : Optional[int] = score.BleurtScorer(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> int:
__lowercase : List[Any] = self.scorer.score(references=UpperCamelCase_ , candidates=UpperCamelCase_ )
return {"scores": scores}
| 76 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 77 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ""
else:
UpperCAmelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int , snake_case_ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : List[Any]=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=snake_case_ , )
UpperCAmelCase_ = ViTHybridConfig(backbone_config=snake_case_ , image_size=3_84 , num_labels=10_00 )
UpperCAmelCase_ = False
# load original model from timm
UpperCAmelCase_ = timm.create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
UpperCAmelCase_ = create_rename_keys(snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTHybridModel(snake_case_ ).eval()
else:
UpperCAmelCase_ = ViTHybridForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = ViTHybridImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(snake_case_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case_ , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( __lowerCamelCase ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( _lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self ):
raise NotImplementedError()
| 79 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case ( ):
'''simple docstring'''
__lowercase = [randint(-1_000 , 1_000 ) for i in range(10 )]
__lowercase = randint(-5_000 , 5_000 )
return (arr, r)
__UpperCamelCase : Any = make_dataset()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for triplet in permutations(lowerCamelCase , 3 ):
if sum(lowerCamelCase ) == target:
return tuple(sorted(lowerCamelCase ) )
return (0, 0, 0)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
arr.sort()
__lowercase = len(lowerCamelCase )
for i in range(n - 1 ):
__lowercase , __lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case ( ):
'''simple docstring'''
__lowercase = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowercase = """
triplet_sum1(*dataset)
"""
__lowercase = """
triplet_sum2(*dataset)
"""
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
return (min(lowerCamelCase ), min(lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : Tuple = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 80 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0 ):
__snake_case , __snake_case : Optional[Any] = 1, 1
__snake_case : Tuple = 2
while True:
__snake_case : List[str] = 0
__snake_case : Optional[int] = fa + fa
__snake_case , __snake_case : str = fa, f
index += 1
for _ in str(__lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 81 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
"""simple docstring"""
import heapq
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A_, [-1 * len(A_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCamelCase : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCamelCase : Dict = heapq.heappop(A_ )[1][0]
chosen_vertices.add(A_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCamelCase : List[str] = elem[1][1].index(A_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 83 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case )
return image
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
def extract(*snake_case , **snake_case ):
class A_ :
'''simple docstring'''
def __init__( self ):
lowercase = torch.ones([0] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.pixel_values.to(snake_case )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=snake_case )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase = 77
lowercase = self.dummy_image.to(snake_case )
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , safety_checker=snake_case , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case )
lowercase = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'A painting of a squirrel eating a burger'
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case , )
lowercase = output.images
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case , return_dict=snake_case , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=snake_case )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase = 77
lowercase = self.dummy_image.to(snake_case )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , safety_checker=snake_case , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case )
lowercase = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'A painting of a squirrel eating a burger'
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=snake_case , num_inference_steps=2 , output_type='np' , image=snake_case , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((760, 504) )
lowercase = 'BAAI/AltDiffusion'
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case , safety_checker=snake_case , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
lowercase = 'A fantasy landscape, trending on artstation'
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , generator=snake_case , output_type='np' , )
lowercase = output.images[0]
lowercase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase = init_image.resize((768, 512) )
lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
lowercase = 'BAAI/AltDiffusion'
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case , safety_checker=snake_case , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
lowercase = 'A fantasy landscape, trending on artstation'
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , generator=snake_case , output_type='np' , )
lowercase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 84 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE__ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case :
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCamelCase_ )} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class snake_case :
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'The input training data file (a text file).'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowercase_ = field(default=UpperCamelCase_ , metadata={'help': 'Whether ot not to use whole word mask.'} )
lowercase_ = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowercase_ = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
lowercase_ = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowercase_ = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( lowercase__ : DataTrainingArguments , lowercase__ : PreTrainedTokenizer , lowercase__ : bool = False , lowercase__ : Optional[str] = None , ):
'''simple docstring'''
def _dataset(lowercase__ : int , lowercase__ : List[Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , ref_path=lowercase__ , )
return LineByLineTextDataset(tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowercase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
SCREAMING_SNAKE_CASE__ : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
SCREAMING_SNAKE_CASE__ : Any = AutoModelWithLMHead.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
SCREAMING_SNAKE_CASE__ : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
SCREAMING_SNAKE_CASE__ : int = (
get_dataset(lowercase__ , tokenizer=lowercase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
SCREAMING_SNAKE_CASE__ : Any = (
get_dataset(lowercase__ , tokenizer=lowercase__ , evaluate=lowercase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
SCREAMING_SNAKE_CASE__ : str = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
SCREAMING_SNAKE_CASE__ : Optional[int] = DataCollatorForWholeWordMask(
tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
else:
SCREAMING_SNAKE_CASE__ : Any = DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , data_collator=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , prediction_loss_only=lowercase__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE__ : int = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : int = math.exp(eval_output['eval_loss'] )
SCREAMING_SNAKE_CASE__ : Any = {'perplexity': perplexity}
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , lowercase__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowercase__ )
return results
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 85 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : BigBirdConfig
_lowerCamelCase : jnp.dtype = jnp.floataa
_lowerCamelCase : bool = True
def __A ( self : Optional[Any] ):
super().setup()
A_ = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Union[str, Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
A_ = super().__call__(*UpperCAmelCase , **UpperCAmelCase )
A_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = FlaxBigBirdForNaturalQuestionsModule
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
def cross_entropy(__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int=None ):
A_ = logits.shape[-1]
A_ = (labels[..., None] == jnp.arange(__UpperCamelCase )[None]).astype("f4" )
A_ = jax.nn.log_softmax(__UpperCamelCase ,axis=-1 )
A_ = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
A_ = reduction(__UpperCamelCase )
return loss
A_ = partial(__UpperCamelCase ,reduction=jnp.mean )
A_ = cross_entropy(__UpperCamelCase ,__UpperCamelCase )
A_ = cross_entropy(__UpperCamelCase ,__UpperCamelCase )
A_ = cross_entropy(__UpperCamelCase ,__UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : str = "google/bigbird-roberta-base"
_lowerCamelCase : int = 3_0_0_0
_lowerCamelCase : int = 1_0_5_0_0
_lowerCamelCase : int = 1_2_8
_lowerCamelCase : int = 3
_lowerCamelCase : int = 1
_lowerCamelCase : int = 5
# tx_args
_lowerCamelCase : float = 3e-5
_lowerCamelCase : float = 0.0
_lowerCamelCase : int = 2_0_0_0_0
_lowerCamelCase : float = 0.0_0_9_5
_lowerCamelCase : str = "bigbird-roberta-natural-questions"
_lowerCamelCase : str = "training-expt"
_lowerCamelCase : str = "data/nq-training.jsonl"
_lowerCamelCase : str = "data/nq-validation.jsonl"
def __A ( self : Optional[int] ):
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase )
A_ = os.path.join(self.base_dir , self.save_dir )
A_ = self.batch_size_per_device * jax.device_count()
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : int = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self : Dict , UpperCAmelCase : Dict ):
A_ = self.collate_fn(UpperCAmelCase )
A_ = jax.tree_util.tree_map(UpperCAmelCase , UpperCAmelCase )
return batch
def __A ( self : List[Any] , UpperCAmelCase : Optional[int] ):
A_ , A_ = self.fetch_inputs(features["input_ids"] )
A_ = {
"input_ids": jnp.array(UpperCAmelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(UpperCAmelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def __A ( self : Optional[Any] , UpperCAmelCase : list ):
A_ = [self._fetch_inputs(UpperCAmelCase ) for ids in input_ids]
return zip(*UpperCAmelCase )
def __A ( self : List[str] , UpperCAmelCase : list ):
A_ = [1 for _ in range(len(UpperCAmelCase ) )]
while len(UpperCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
if seed is not None:
A_ = dataset.shuffle(seed=__UpperCamelCase )
for i in range(len(__UpperCamelCase ) // batch_size ):
A_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__UpperCamelCase )
@partial(jax.pmap ,axis_name="batch" )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : int ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
def loss_fn(__UpperCamelCase : Optional[Any] ):
A_ = model_inputs.pop("start_labels" )
A_ = model_inputs.pop("end_labels" )
A_ = model_inputs.pop("pooled_labels" )
A_ = state.apply_fn(**__UpperCamelCase ,params=__UpperCamelCase ,dropout_rng=__UpperCamelCase ,train=__UpperCamelCase )
A_ , A_ , A_ = outputs
return state.loss_fn(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
A_ , A_ = jax.random.split(__UpperCamelCase )
A_ = jax.value_and_grad(__UpperCamelCase )
A_ , A_ = grad_fn(state.params )
A_ = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
A_ = jax.lax.pmean(__UpperCamelCase ,"batch" )
A_ = state.apply_gradients(grads=__UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="batch" )
def __snake_case ( __UpperCamelCase : Tuple ,**__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = model_inputs.pop("start_labels" )
A_ = model_inputs.pop("end_labels" )
A_ = model_inputs.pop("pooled_labels" )
A_ = state.apply_fn(**__UpperCamelCase ,params=state.params ,train=__UpperCamelCase )
A_ , A_ , A_ = outputs
A_ = state.loss_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
return metrics
class _a ( train_state.TrainState ):
"""simple docstring"""
_lowerCamelCase : Callable = struct.field(pytree_node=snake_case_ )
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : Args
_lowerCamelCase : Callable
_lowerCamelCase : Callable
_lowerCamelCase : Callable
_lowerCamelCase : Callable
_lowerCamelCase : wandb
_lowerCamelCase : Callable = None
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Any=None ):
A_ = model.params
A_ = TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase , tx=UpperCAmelCase , loss_fn=UpperCAmelCase , )
if ckpt_dir is not None:
A_ , A_ , A_ , A_ , A_ = restore_checkpoint(UpperCAmelCase , UpperCAmelCase )
A_ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
A_ , A_ = build_tx(**UpperCAmelCase )
A_ = train_state.TrainState(
step=UpperCAmelCase , apply_fn=model.__call__ , params=UpperCAmelCase , tx=UpperCAmelCase , opt_state=UpperCAmelCase , )
A_ = args
A_ = data_collator
A_ = lr
A_ = params
A_ = jax_utils.replicate(UpperCAmelCase )
return state
def __A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = self.args
A_ = len(UpperCAmelCase ) // args.batch_size
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(UpperCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = get_batched_dataset(UpperCAmelCase , args.batch_size , seed=UpperCAmelCase )
A_ = 0
for batch in tqdm(UpperCAmelCase , total=UpperCAmelCase , desc=f'''Running EPOCH-{epoch}''' ):
A_ = self.data_collator(UpperCAmelCase )
A_ , A_ , A_ = self.train_step_fn(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
A_ = jax_utils.unreplicate(state.step )
A_ = running_loss.item() / i
A_ = self.scheduler_fn(state_step - 1 )
A_ = self.evaluate(UpperCAmelCase , UpperCAmelCase )
A_ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCAmelCase ) )
self.logger.log(UpperCAmelCase , commit=UpperCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = get_batched_dataset(UpperCAmelCase , self.args.batch_size )
A_ = len(UpperCAmelCase ) // self.args.batch_size
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = 0
for batch in tqdm(UpperCAmelCase , total=UpperCAmelCase , desc="Evaluating ... " ):
A_ = self.data_collator(UpperCAmelCase )
A_ = self.val_step_fn(UpperCAmelCase , **UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = jax_utils.unreplicate(UpperCAmelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... " )
self.model_save_fn(UpperCAmelCase , params=state.params )
with open(os.path.join(UpperCAmelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCAmelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(UpperCAmelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , UpperCAmelCase )
print("DONE" )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' ,end=" ... " )
with open(os.path.join(__UpperCamelCase ,"flax_model.msgpack" ) ,"rb" ) as f:
A_ = from_bytes(state.params ,f.read() )
with open(os.path.join(__UpperCamelCase ,"opt_state.msgpack" ) ,"rb" ) as f:
A_ = from_bytes(state.opt_state ,f.read() )
A_ = joblib.load(os.path.join(__UpperCamelCase ,"args.joblib" ) )
A_ = joblib.load(os.path.join(__UpperCamelCase ,"data_collator.joblib" ) )
with open(os.path.join(__UpperCamelCase ,"training_state.json" ) ,"r" ) as f:
A_ = json.load(__UpperCamelCase )
A_ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : int ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = num_train_steps - warmup_steps
A_ = optax.linear_schedule(init_value=__UpperCamelCase ,end_value=__UpperCamelCase ,transition_steps=__UpperCamelCase )
A_ = optax.linear_schedule(init_value=__UpperCamelCase ,end_value=1E-7 ,transition_steps=__UpperCamelCase )
A_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : Dict ):
"""simple docstring"""
def weight_decay_mask(__UpperCamelCase : int ):
A_ = traverse_util.flatten_dict(__UpperCamelCase )
A_ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(__UpperCamelCase )
A_ = scheduler_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ = optax.adamw(learning_rate=__UpperCamelCase ,weight_decay=__UpperCamelCase ,mask=__UpperCamelCase )
return tx, lr | 86 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : int=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[str]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=False) ->Tuple:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 87 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
"""simple docstring"""
import numpy as np
def _snake_case ( __snake_case : np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _snake_case ( __snake_case : np.ndarray ):
"""simple docstring"""
return vector * sigmoid(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase, lowerCamelCase=7, lowerCamelCase=3, lowerCamelCase=18, lowerCamelCase=30, lowerCamelCase=4_00, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=[0.5, 0.5, 0.5], lowerCamelCase=[0.5, 0.5, 0.5], ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : List[Any] = num_channels
_lowercase : str = image_size
_lowercase : str = min_resolution
_lowercase : str = max_resolution
_lowercase : int = do_resize
_lowercase : Dict = size if size is not None else {'height': 18, 'width': 20}
_lowercase : Optional[int] = do_thumbnail
_lowercase : Optional[Any] = do_align_axis
_lowercase : Optional[Any] = do_pad
_lowercase : str = do_normalize
_lowercase : Any = image_mean
_lowercase : Tuple = image_std
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = DonutImageProcessingTester(self)
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase, 'do_resize'))
self.assertTrue(hasattr(lowerCamelCase, 'size'))
self.assertTrue(hasattr(lowerCamelCase, 'do_thumbnail'))
self.assertTrue(hasattr(lowerCamelCase, 'do_align_long_axis'))
self.assertTrue(hasattr(lowerCamelCase, 'do_pad'))
self.assertTrue(hasattr(lowerCamelCase, 'do_normalize'))
self.assertTrue(hasattr(lowerCamelCase, 'image_mean'))
self.assertTrue(hasattr(lowerCamelCase, 'image_std'))
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 18, 'width': 20})
_lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'height': 42, 'width': 42})
# Previous config had dimensions in (width, height) order
_lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84))
self.assertEqual(image_processor.size, {'height': 84, 'width': 42})
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image)
# Test not batched input
_lowercase : List[str] = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Optional[int] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
@is_flaky()
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray)
# Test not batched input
_lowercase : str = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Optional[Any] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
@is_flaky()
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor)
# Test not batched input
_lowercase : List[Any] = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Optional[int] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
| 89 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 92 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__A = """Create a default config file for Accelerate with only a few flags set."""
def __A (_SCREAMING_SNAKE_CASE="no" , _SCREAMING_SNAKE_CASE = default_json_config_file , _SCREAMING_SNAKE_CASE = False ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :int = Path(_SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
lowerCAmelCase__ :Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
lowerCAmelCase__ :Union[str, Any] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase__ :str = torch.cuda.device_count()
lowerCAmelCase__ :Any = num_gpus
lowerCAmelCase__ :Tuple = False
if num_gpus > 1:
lowerCAmelCase__ :int = 'MULTI_GPU'
else:
lowerCAmelCase__ :int = 'NO'
elif is_xpu_available() and use_xpu:
lowerCAmelCase__ :Optional[Any] = torch.xpu.device_count()
lowerCAmelCase__ :Tuple = num_xpus
lowerCAmelCase__ :List[str] = False
if num_xpus > 1:
lowerCAmelCase__ :Any = 'MULTI_XPU'
else:
lowerCAmelCase__ :List[str] = 'NO'
elif is_npu_available():
lowerCAmelCase__ :Optional[int] = torch.npu.device_count()
lowerCAmelCase__ :Union[str, Any] = num_npus
lowerCAmelCase__ :Optional[Any] = False
if num_npus > 1:
lowerCAmelCase__ :Dict = 'MULTI_NPU'
else:
lowerCAmelCase__ :int = 'NO'
else:
lowerCAmelCase__ :List[Any] = 0
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :str = 1
lowerCAmelCase__ :Optional[Any] = 'NO'
lowerCAmelCase__ :Optional[int] = ClusterConfig(**_SCREAMING_SNAKE_CASE )
config.to_json_file(_SCREAMING_SNAKE_CASE )
return path
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = parser.add_parser('default' , parents=_SCREAMING_SNAKE_CASE , help=_SCREAMING_SNAKE_CASE , formatter_class=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=_SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 93 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
lowercase : Union[str, Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : str=32 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=None , ) -> int:
'''simple docstring'''
lowercase : Dict =parent
lowercase : Optional[int] =batch_size
lowercase : Optional[Any] =seq_length
lowercase : Tuple =is_training
lowercase : Dict =use_input_mask
lowercase : Any =use_token_type_ids
lowercase : int =use_labels
lowercase : int =vocab_size
lowercase : Dict =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : Tuple =hidden_act
lowercase : str =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : List[Any] =type_vocab_size
lowercase : List[str] =type_sequence_label_size
lowercase : int =initializer_range
lowercase : int =num_labels
lowercase : Optional[int] =num_choices
lowercase : int =scope
lowercase : List[str] =embedding_size
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int =None
if self.use_input_mask:
lowercase : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] =None
if self.use_token_type_ids:
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =None
lowercase : Optional[Any] =None
lowercase : Optional[Any] =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Dict =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : int =TFMobileBertModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[Any] =model(UpperCAmelCase )
lowercase : Optional[Any] =[input_ids, input_mask]
lowercase : Union[str, Any] =model(UpperCAmelCase )
lowercase : List[Any] =model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =TFMobileBertForMaskedLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Any =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =TFMobileBertForNextSentencePrediction(config=UpperCAmelCase )
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Optional[int] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =TFMobileBertForPreTraining(config=UpperCAmelCase )
lowercase : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A__ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Tuple =TFMobileBertForSequenceClassification(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : int =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Tuple =TFMobileBertForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Dict =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : List[str] =TFMobileBertForTokenClassification(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : int =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFMobileBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] =config_and_inputs
lowercase : Optional[int] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase : Any =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : int ) -> int:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase : Any =TFMobileBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : Any =TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Dict =model(UpperCAmelCase )[0]
lowercase : Optional[int] =[1, 6, 3_0522]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Dict =tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 94 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class UpperCamelCase_ (__A ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class UpperCamelCase_ (__A ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
| 95 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[str] = AltDiffusionPipeline
a :List[Any] = TEXT_TO_IMAGE_PARAMS
a :Any = TEXT_TO_IMAGE_BATCH_PARAMS
a :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
a :Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowercase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
lowercase_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ = 7_7
lowercase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self : Tuple ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowercase ( self : Dict ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : List[Any] ) -> Any:
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A photo of an astronaut'''
lowercase_ = alt_pipe(**SCREAMING_SNAKE_CASE_ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Union[str, Any] ) -> int:
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe(**SCREAMING_SNAKE_CASE_ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] ) -> Dict:
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''numpy''' )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 97 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowercase__ : Any = TypeVar('T')
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : T ) -> None:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = self
_UpperCamelCase = 0
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = {}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : T ) -> None:
'''simple docstring'''
_UpperCamelCase = DisjointSetTreeNode(lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : T ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
_UpperCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
_UpperCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : DisjointSetTreeNode[T] , lowerCAmelCase__ : DisjointSetTreeNode[T] ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
_UpperCamelCase = nodea
else:
_UpperCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case__ ( self : Any , lowerCAmelCase__ : T , lowerCAmelCase__ : T ) -> None:
'''simple docstring'''
self.link(self.find_set(lowerCAmelCase__ ) , self.find_set(lowerCAmelCase__ ) )
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Dict ) -> None:
'''simple docstring'''
_UpperCamelCase = {}
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : T ) -> None:
'''simple docstring'''
if node not in self.connections:
_UpperCamelCase = {}
def snake_case__ ( self : Tuple , lowerCAmelCase__ : T , lowerCAmelCase__ : T , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
self.add_node(lowerCAmelCase__ )
self.add_node(lowerCAmelCase__ )
_UpperCamelCase = weight
_UpperCamelCase = weight
def snake_case__ ( self : Any ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase__ : x[2] )
# creating the disjoint set
_UpperCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCAmelCase__ )
# MST generation
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = edges[index]
index += 1
_UpperCamelCase = disjoint_set.find_set(lowerCAmelCase__ )
_UpperCamelCase = disjoint_set.find_set(lowerCAmelCase__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
disjoint_set.union(lowerCAmelCase__ , lowerCAmelCase__ )
return graph
| 98 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
def a (lowerCAmelCase__ = 4_000_000 ):
__a = [0, 1]
__a = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__a = 0
for j in range(len(lowerCAmelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 99 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = '''last'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertWithLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForQuestionAnsweringSimple(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFFlaubertForTokenClassification(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = TFFlaubertForMultipleChoice(config=A_ )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase__ : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
SCREAMING_SNAKE_CASE__ = model(A_ )[0]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 100 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = PriorTransformer
_UpperCAmelCase = """hidden_states"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 4
SCREAMING_SNAKE_CASE_ : int = 8
SCREAMING_SNAKE_CASE_ : Optional[Any] = 7
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self , lowerCAmelCase__=0 ):
"""simple docstring"""
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Tuple = 8
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : str = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (4, 8)
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (4, 8)
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
SCREAMING_SNAKE_CASE_ : Tuple = model.to(lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = output[0, :5].flatten().cpu()
print(lowerCAmelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-2 ) )
@slow
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self , lowerCAmelCase__=1 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=7_7 , lowerCAmelCase__=0 ):
"""simple docstring"""
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = embedding_dim
SCREAMING_SNAKE_CASE_ : Dict = num_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[3_7, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_seed_input(seed=lowerCAmelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**lowerCAmelCase__ )[0]
assert list(sample.shape ) == [1, 7_6_8]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample[0, :8].flatten().cpu()
print(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
| 101 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__magic_name__ : List[str] = float("""nan""")
class lowercase__ :
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = sys.stdout
UpperCamelCase : str = open(_A , """a""" )
def __getattr__( self , _A ):
'''simple docstring'''
return getattr(self.stdout , _A )
def _a ( self , _A ):
'''simple docstring'''
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , _A , 0 , re.M ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=False ):
UpperCamelCase : List[str] = []
# deal with critical env vars
UpperCamelCase : int = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
UpperCamelCase : str = os.environ.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
UpperCamelCase : Union[str, Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(SCREAMING_SNAKE_CASE )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
UpperCamelCase : List[Any] = []
UpperCamelCase : Dict = """"""
while len(SCREAMING_SNAKE_CASE ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = """"""
return "\\\n".join(SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# unwrap multi-line input
UpperCamelCase : int = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
UpperCamelCase : Union[str, Any] = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
UpperCamelCase : Optional[Any] = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
UpperCamelCase : Tuple = subprocess.run(SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
UpperCamelCase : List[Any] = variation.replace(""" """ , """-""" )
with open(Path(SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
UpperCamelCase : Tuple = []
UpperCamelCase : List[Any] = []
UpperCamelCase : Tuple = f"""{id}: {variation:<{longest_variation_len}}"""
UpperCamelCase : List[str] = f"""{preamble}: """
UpperCamelCase : List[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(SCREAMING_SNAKE_CASE ) , desc=SCREAMING_SNAKE_CASE , leave=SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = process_run_single(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = single_run_metrics[target_metric_key]
if not math.isnan(SCREAMING_SNAKE_CASE ):
metrics.append(SCREAMING_SNAKE_CASE )
results.append(SCREAMING_SNAKE_CASE )
outcome += "✓"
else:
outcome += "✘"
UpperCamelCase : Any = f"""\33[2K\r{outcome}"""
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
UpperCamelCase : Any = round(mean_metrics[target_metric_key] , 2 )
UpperCamelCase : Any = f"""{outcome} {mean_target}"""
if len(SCREAMING_SNAKE_CASE ) > 1:
results_str += f""" {tuple(round(SCREAMING_SNAKE_CASE , 2 ) for x in results )}"""
print(SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = variation
return mean_metrics
else:
print(SCREAMING_SNAKE_CASE )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase ():
UpperCamelCase : int = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = pd.DataFrame(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = """variation"""
UpperCamelCase : int = """diff_%"""
UpperCamelCase : List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
UpperCamelCase : Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(SCREAMING_SNAKE_CASE ):
# as a fallback, use the minimal value as the sentinel
UpperCamelCase : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = df.apply(
lambda SCREAMING_SNAKE_CASE : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
UpperCamelCase : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
UpperCamelCase : Dict = df.reindex(SCREAMING_SNAKE_CASE , axis="""columns""" ) # reorder cols
# capitalize
UpperCamelCase : Any = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
UpperCamelCase : int = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
UpperCamelCase : Tuple = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace("""_""" , """\n""" ) , axis="""columns""" )
UpperCamelCase : List[str] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
print("""\n\n""".join(SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ():
UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , nargs="""+""" , required=SCREAMING_SNAKE_CASE , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=SCREAMING_SNAKE_CASE , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=SCREAMING_SNAKE_CASE , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=SCREAMING_SNAKE_CASE , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=SCREAMING_SNAKE_CASE , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Union[str, Any] = args.output_dir
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = get_base_command(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# split each dimension into its --foo variations
UpperCamelCase : Union[str, Any] = [list(map(str.strip , re.split(r"""\|""" , SCREAMING_SNAKE_CASE ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
UpperCamelCase : int = list(map(str.strip , map(""" """.join , itertools.product(*SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : int = max(len(SCREAMING_SNAKE_CASE ) for x in variations )
# split wanted keys
UpperCamelCase : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
UpperCamelCase : List[str] = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
UpperCamelCase : Union[str, Any] = Tee(SCREAMING_SNAKE_CASE )
print(f"""\n*** Running {len(SCREAMING_SNAKE_CASE )} benchmarks:""" )
print(f"""Base command: {" ".join(SCREAMING_SNAKE_CASE )}""" )
UpperCamelCase : Optional[Any] = """variation"""
UpperCamelCase : Any = []
for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc="""Total completion: """ , leave=SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.repeat_times , SCREAMING_SNAKE_CASE , args.verbose , ) )
process_results(SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.base_variation , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 102 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[Any] = ['''pixel_values''']
def __init__( self : Optional[Any] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_snake_case = size if size is not None else {'''shortest_edge''': 3_8_4}
_snake_case = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_snake_case = do_resize
_snake_case = size
# Default value set here for backwards compatibility where the value in config is None
_snake_case = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : float , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
_snake_case = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_snake_case = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_snake_case = int(shortest_edge / crop_pct )
_snake_case = get_resize_output_image_size(__lowerCamelCase , size=__lowerCamelCase , default_to_square=__lowerCamelCase )
_snake_case = resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=__lowerCamelCase , **__lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ):
"""simple docstring"""
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
"""simple docstring"""
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = crop_pct if crop_pct is not None else self.crop_pct
_snake_case = resample if resample is not None else self.resample
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_snake_case = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , crop_pct=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
_snake_case = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 103 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 104 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase__ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,'models/bert/' ) )
SCREAMING_SNAKE_CASE_ : Any = self.transformer_dir
shutil.copy(
os.path.join(snake_case__ ,'src/transformers/models/bert/modeling_bert.py' ) ,os.path.join(self.transformer_dir ,'models/bert/modeling_bert.py' ) ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Dict = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
SCREAMING_SNAKE_CASE_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
SCREAMING_SNAKE_CASE_ : Optional[int] = black.format_str(snake_case__ ,mode=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.transformer_dir ,'new_code.py' )
with open(snake_case__ ,'w' ,newline='\n' ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=snake_case__ )
with open(snake_case__ ,'r' ) as f:
self.assertTrue(f.read() ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' ,'BertLMPredictionHead' ,REFERENCE_CODE + '\n' ,)
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' ,'BertLMPredictionHead' ,snake_case__ ,)
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' ,'TestModelLMPredictionHead' ,re.sub('Bert' ,'TestModel' ,snake_case__ ) ,)
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ : str = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' ,F'{long_class_name}LMPredictionHead' ,re.sub('Bert' ,snake_case__ ,snake_case__ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' ,'TestModelLMPredictionHead' ,snake_case__ ,overwrite_result=re.sub('Bert' ,'TestModel' ,snake_case__ ) ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE_ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = check_copies.convert_to_localized_md(
snake_case__ ,snake_case__ ,localized_readme['format_model_list'] )
self.assertFalse(snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = check_copies.convert_to_localized_md(
snake_case__ ,snake_case__ ,localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = check_copies.convert_to_localized_md(
snake_case__ ,snake_case__ ,localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(snake_case__ ,snake_case__ )
| 105 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Union[str, Any] = StableUnCLIPPipeline
A_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
A_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
A_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A_ : Optional[Any] = False
def __UpperCamelCase ( self : Tuple ) -> str:
A = 32
A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCamelCase , projection_dim=__UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
A = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=__UpperCamelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A = StableUnCLIPImageNormalizer(embedding_dim=__UpperCamelCase )
A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCamelCase , layers_per_block=1 , upcast_attention=__UpperCamelCase , use_linear_projection=__UpperCamelCase , )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
A = AutoencoderKL()
A = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=0 ) -> Optional[int]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
A = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
A = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCamelCase )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = pipe('anime turle' , generator=__UpperCamelCase , output_type='np' )
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 106 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_UpperCAmelCase : Dict = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_UpperCAmelCase : List[str] = {
'''RUCAIBox/mvp''': 10_24,
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
__lowerCAmelCase = MvpTokenizer
def __init__( self : Tuple, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Optional[Any]="replace", UpperCamelCase__ : Dict="<s>", UpperCamelCase__ : Any="</s>", UpperCamelCase__ : Tuple="</s>", UpperCamelCase__ : int="<s>", UpperCamelCase__ : Any="<unk>", UpperCamelCase__ : Dict="<pad>", UpperCamelCase__ : int="<mask>", UpperCamelCase__ : int=False, UpperCamelCase__ : int=True, **UpperCamelCase__ : int, ) -> Tuple:
super().__init__(
UpperCamelCase__, UpperCamelCase__, tokenizer_file=UpperCamelCase__, errors=UpperCamelCase__, bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, unk_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, trim_offsets=UpperCamelCase__, **UpperCamelCase__, )
_A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', UpperCamelCase__ ) != add_prefix_space:
_A = getattr(UpperCamelCase__, pre_tok_state.pop('type' ) )
_A = add_prefix_space
_A = pre_tok_class(**UpperCamelCase__ )
_A = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_A = 'post_processor'
_A = getattr(self.backend_tokenizer, UpperCamelCase__, UpperCamelCase__ )
if tokenizer_component_instance:
_A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A = tuple(state['sep'] )
if "cls" in state:
_A = tuple(state['cls'] )
_A = False
if state.get('add_prefix_space', UpperCamelCase__ ) != add_prefix_space:
_A = add_prefix_space
_A = True
if state.get('trim_offsets', UpperCamelCase__ ) != trim_offsets:
_A = trim_offsets
_A = True
if changes_to_apply:
_A = getattr(UpperCamelCase__, state.pop('type' ) )
_A = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer, UpperCamelCase__, UpperCamelCase__ )
@property
def __UpperCAmelCase ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Any ) -> List[str]:
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else value
_A = value
def __UpperCAmelCase ( self : str, *UpperCamelCase__ : Dict, **UpperCamelCase__ : Any ) -> BatchEncoding:
_A = kwargs.get('is_split_into_words', UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[int], *UpperCamelCase__ : Optional[int], **UpperCamelCase__ : Any ) -> BatchEncoding:
_A = kwargs.get('is_split_into_words', UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
_A = self._tokenizer.model.save(UpperCamelCase__, name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any]=None ) -> List[Any]:
_A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 107 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase ) , '''Tatoeba directory does not exist.''' )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase )
@slow
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng" | 108 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __a ( _snake_case ):
__UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __a ( _snake_case ):
__UpperCamelCase : np.ndarray
__UpperCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 109 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a ( lowercase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Dict = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
UpperCAmelCase__ : int = Text(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , **UpperCamelCase_ , )
def __snake_case ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCAmelCase__ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 110 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''DeiTFeatureExtractor''']
__lowerCAmelCase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = ['''pixel_values''']
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict = True , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Any] = PILImageResampling.BILINEAR , UpperCamelCase_: Optional[Any] = True , UpperCamelCase_: str = 1 / 255 , UpperCamelCase_: List[Any] = True , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: Union[str, Any] = True , **UpperCamelCase_: Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase__ = size if size is not None else {"""shortest_edge""": 224}
lowercase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase__ = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
lowercase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_flip_channel_order
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Tuple = PIL.Image.BILINEAR , UpperCamelCase_: Optional[Any] = None , **UpperCamelCase_: Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Dict , ) -> int:
"""simple docstring"""
lowercase__ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] = None , **UpperCamelCase_: str , ) -> List[str]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Any = None ) -> Union[str, Any]:
"""simple docstring"""
return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Any = None , UpperCamelCase_: Any = None , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: int = None , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: Tuple = None , UpperCamelCase_: Any = None , UpperCamelCase_: List[Any] = None , UpperCamelCase_: Union[str, Any] = ChannelDimension.FIRST , **UpperCamelCase_: Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase__ = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase__ = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str = None ) -> Dict:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.dummy_uncond_unet
_SCREAMING_SNAKE_CASE : Union[str, Any] = ScoreSdeVeScheduler()
_SCREAMING_SNAKE_CASE : int = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
sde_ve.to(SCREAMING_SNAKE_CASE_ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=SCREAMING_SNAKE_CASE_ ).images
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )[
0
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = """google/ncsnpp-church-256"""
_SCREAMING_SNAKE_CASE : Any = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : List[str] = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Tuple = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
sde_ve.to(SCREAMING_SNAKE_CASE_ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=SCREAMING_SNAKE_CASE_ ).images
_SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_SCREAMING_SNAKE_CASE : Tuple = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 533 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE = True
def __init__( self , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__a =kwargs.pop('encoder' )
__a =encoder_config.pop('model_type' )
__a =kwargs.pop('decoder' )
__a =decoder_config.pop('model_type' )
__a =AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__a =AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__a =True
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__a =True
__a =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.encoder.to_dict()
__a =self.decoder.to_dict()
__a =self.__class__.model_type
return output
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =OrderedDict()
__a ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__a ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__a ={0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def __magic_name__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ) -> List[str]:
'''simple docstring'''
import torch
__a =OrderedDict()
__a =super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
__a =dummy_input["""input_ids"""].shape
__a =(batch, encoder_sequence, self._config.encoder_hidden_size)
__a =dummy_input.pop('input_ids' )
__a =dummy_input.pop('attention_mask' )
__a =torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = "default" ) -> Dict:
'''simple docstring'''
__a =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 242 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase : Optional[Any] = logging.getLogger()
UpperCAmelCase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
a__ : Dict ={"""source""": """What is love ?""", """target""": """life"""}
a__ : List[str] ={"""train""": 1_2, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
a__ : Optional[int] ="""\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , F'''{split}.{field}''' ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = "pytorch" ) -> int:
'''simple docstring'''
a__ : List[Any] =self.get_auto_remove_tmp_dir()
a__ : Tuple =os.path.join(SCREAMING_SNAKE_CASE_ , "output" )
a__ : Any =os.path.join(SCREAMING_SNAKE_CASE_ , "data" )
self._create_dummy_data(data_dir=SCREAMING_SNAKE_CASE_ )
a__ : int =F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
a__ : Dict =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
a__ : Optional[Any] =os.path.join(SCREAMING_SNAKE_CASE_ , "metrics.json" )
with open(SCREAMING_SNAKE_CASE_ ) as f:
a__ : Any =json.load(SCREAMING_SNAKE_CASE_ )
return result
@require_torch_gpu
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Tuple =self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Any =self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 563 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[float] , _lowerCAmelCase : list[float] ):
"""simple docstring"""
_lowerCamelCase : str = sorted(numsa + numsa )
_lowerCamelCase : Any = divmod(len(__A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ : List[str] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 44 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
'''simple docstring'''
import sys
UpperCamelCase__ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
for digit in s:
product *= int(__A )
return product
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = N ):
"""simple docstring"""
lowercase_ : Union[str, Any] = -sys.maxsize - 1
lowercase_ : Optional[Any] = n[:13]
lowercase_ : Any = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase_ : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase_ : Optional[Any] = max(__A , str_eval(__A ) )
lowercase_ : Optional[int] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 620 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A_ : Any = path_or_paths if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else {self.split: path_or_paths}
A_ : str = Text(
cache_dir=SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.streaming:
A_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Tuple = None
A_ : str = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , num_proc=self.num_proc , )
A_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
| 590 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
from math import sqrt
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) -> int:
lowercase__ : int = 0
lowercase__ : int = 0
lowercase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__A ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }') | 397 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : Union[str, Any] = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = ['''DPTFeatureExtractor''']
_UpperCamelCase : Dict = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 541 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Dict , lowercase__ : Optional[int] , lowercase__ : List[str]=13 , lowercase__ : str=7 , lowercase__ : int=True , lowercase__ : Optional[int]=True , lowercase__ : Tuple=True , lowercase__ : Tuple=True , lowercase__ : str=99 , lowercase__ : Dict=32 , lowercase__ : int=5 , lowercase__ : List[Any]=4 , lowercase__ : str=37 , lowercase__ : Optional[int]="gelu" , lowercase__ : Optional[Any]=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : List[Any]=512 , lowercase__ : str=16 , lowercase__ : str=2 , lowercase__ : str=0.02 , lowercase__ : Tuple=4 , ):
'''simple docstring'''
a_ : Tuple = parent
a_ : Union[str, Any] = batch_size
a_ : Union[str, Any] = seq_length
a_ : List[str] = is_training
a_ : Optional[int] = use_attention_mask
a_ : List[Any] = use_token_type_ids
a_ : Tuple = use_labels
a_ : List[str] = vocab_size
a_ : Optional[Any] = hidden_size
a_ : str = num_hidden_layers
a_ : Optional[Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_act
a_ : Optional[Any] = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : Tuple = type_sequence_label_size
a_ : int = initializer_range
a_ : List[Any] = num_choices
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : List[str] = None
if self.use_attention_mask:
a_ : str = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Dict = None
if self.use_token_type_ids:
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : int = self.prepare_config_and_inputs()
a_ : List[Any] = config_and_inputs
a_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : Tuple = self.prepare_config_and_inputs()
a_ : List[Any] = config_and_inputs
a_ : Tuple = True
a_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : List[str] = True
__magic_name__ : Optional[Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ : int = FlaxBertModelTester(self )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : str = FlaxBertModel.from_pretrained("""bert-base-cased""" )
a_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 442 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=99 , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=9 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_=8 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0_02 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Dict:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = encoder_seq_length
_UpperCamelCase = decoder_seq_length
# For common tests
_UpperCamelCase = self.decoder_seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = d_ff
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = dropout_rate
_UpperCamelCase = initializer_factor
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = decoder_start_token_id
_UpperCamelCase = None
_UpperCamelCase = decoder_layers
def lowercase ( self ) -> Dict:
"""simple docstring"""
return TaConfig.from_pretrained("google/umt5-base" )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if decoder_head_mask is None:
_UpperCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if cross_attn_head_mask is None:
_UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase = self.get_config()
_UpperCamelCase = config.num_attention_heads
_UpperCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, input_dict
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_UpperCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
_UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = result.last_hidden_state
_UpperCamelCase = result.past_key_values
_UpperCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
_UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
_UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
_UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).half().eval()
_UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_ ).any().item() )
@require_torch
class lowerCamelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
__lowercase : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowercase : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowercase : List[Any] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
__lowercase : str = False
__lowercase : Dict = False
__lowercase : Union[str, Any] = True
__lowercase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowercase : Union[str, Any] = [0.8, 0.9]
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE_ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs[0]
_UpperCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
model.to(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ , head_masking.items() ):
_UpperCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE_ , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=SCREAMING_SNAKE_CASE_ , legacy=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE_ ).input_ids
# fmt: off
_UpperCamelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_ ) )
_UpperCamelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 147 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = MgpstrTokenizer
_lowercase : Tuple = False
_lowercase : int = {}
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
super().setUp()
# fmt: off
lowercase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def lowerCamelCase_ ( self: str , **UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = """tester"""
lowercase__ = """tester"""
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowercase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowercase__ = tokenizer.encode([special_token] , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
lowercase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowercase__ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowercase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowercase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowercase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(len(SCREAMING_SNAKE_CASE_ ) , 0 )
lowercase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
pass
| 43 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : int = '''markuplm'''
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case=0 , __snake_case=2 , __snake_case=256 , __snake_case=1024 , __snake_case=216 , __snake_case=1001 , __snake_case=32 , __snake_case=50 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
_SCREAMING_SNAKE_CASE : Any = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
_SCREAMING_SNAKE_CASE : Any = use_cache
_SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
# additional properties
_SCREAMING_SNAKE_CASE : Tuple = max_depth
_SCREAMING_SNAKE_CASE : Optional[Any] = max_xpath_tag_unit_embeddings
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_xpath_subs_unit_embeddings
_SCREAMING_SNAKE_CASE : List[str] = tag_pad_id
_SCREAMING_SNAKE_CASE : Dict = subs_pad_id
_SCREAMING_SNAKE_CASE : Any = xpath_unit_hidden_size
| 533 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Any=0 ):
"""simple docstring"""
if name is None:
__a =None
else:
__a =""".""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
__a =fmt.format(__A )
# Print and recurse (if needed).
if isinstance(__A , __A ):
if msg is not None:
print(__A )
for k in val.keys():
recursive_print(__A , val[k] , spaces + 2 )
elif isinstance(__A , torch.Tensor ):
print(__A , ':' , val.size() )
else:
print(__A , ':' , __A )
def UpperCamelCase_( _snake_case : Any , _snake_case : Dict , _snake_case : str , _snake_case : Tuple , _snake_case : Optional[int] ):
"""simple docstring"""
__a =param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__a =(num_heads, hidden_size, num_splits) + input_shape[1:]
__a =param.view(*__A )
__a =param.transpose(0 , 2 )
__a =param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__a =(num_heads, num_splits, hidden_size) + input_shape[1:]
__a =param.view(*__A )
__a =param.transpose(0 , 1 ).contiguous()
__a =param.view(*__A )
return param
def UpperCamelCase_( _snake_case : str , _snake_case : Dict , _snake_case : str ):
"""simple docstring"""
__a ={}
# old versions did not store training args
__a =input_state_dict.get('args' , __A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__a =ds_args.padded_vocab_size
__a =ds_args.max_position_embeddings
__a =ds_args.hidden_size
__a =ds_args.num_layers
__a =ds_args.num_attention_heads
__a =ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__a =config.n_head
# The hidden_size per head.
__a =config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__a =input_state_dict["""checkpoint_version"""]
else:
__a =0.0
# The model.
__a =input_state_dict["""model"""]
# The language model.
__a =model["""language_model"""]
# The embeddings.
__a =lm["""embedding"""]
# The word embeddings.
__a =embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
__a =word_embeddings[: config.vocab_size, :]
__a =word_embeddings
# The position embeddings.
__a =embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__a =pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
__a =pos_embeddings
# The transformer.
__a =lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
__a =re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
__a ={
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__a =layer_re.match(__A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__a =int(m.group(1 ) )
# The name of the operation.
__a =m.group(2 )
# Is it a weight or a bias?
__a =m.group(3 )
# The name of the layer.
__a =F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
__a ="""ln_1""" if op_name.startswith('input' ) else """ln_2"""
__a =val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__a =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __A , __A )
__a =causal_mask
# Insert a "dummy" tensor for masked_bias.
__a =torch.tensor(-1e4 , dtype=torch.floataa )
__a =masked_bias
__a =fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__a =out_val.transpose(0 , 1 ).contiguous()
# Store.
__a =out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__a =fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Store. No change of shape.
__a =out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__a =megatron_to_transformers[op_name]
__a =val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__a =megatron_to_transformers[op_name]
__a =val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__a =transformer["""final_layernorm.weight"""]
__a =transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
__a =word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=__A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=__A , help='An optional config json file describing the pre-trained model.' , )
__a =parser.parse_args()
# Extract the basename.
__a =os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
__a =torch.load(__A , map_location='cpu' )
else:
__a =torch.load(args.path_to_checkpoint , map_location='cpu' )
__a =input_state_dict.get('args' , __A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__a ="""gelu_fast"""
elif ds_args.openai_gelu:
__a ="""gelu_new"""
else:
__a ="""gelu"""
else:
# in the very early days this used to be "gelu_new"
__a ="""gelu_new"""
# Spell out all parameters in case the defaults change.
__a =GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=__A , summary_activation=__A , summary_proj_to_labels=__A , summary_first_dropout=0.1 , scale_attn_weights=__A , use_cache=__A , bos_token_id=50256 , eos_token_id=50256 , )
else:
__a =GPTaConfig.from_json_file(args.config_file )
__a =["""GPT2LMHeadModel"""]
# Convert.
print('Converting' )
__a =convert_megatron_checkpoint(__A , __A , __A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__A , __A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__a =ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__a ="""gpt2"""
elif tokenizer_type == "PretrainedFromHF":
__a =ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
__a ="""gpt2"""
__a =AutoTokenizer.from_pretrained(__A )
__a =type(__A ).__name__
__a =tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(__A )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__A )
# Store the state_dict to file.
__a =os.path.join(__A , 'pytorch_model.bin' )
print(F'Saving checkpoint to \"{output_checkpoint_file}\"' )
torch.save(__A , __A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 242 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =parent
a__ : Optional[Any] =batch_size
a__ : List[str] =image_size
a__ : Dict =patch_size
a__ : Any =num_channels
a__ : Dict =is_training
a__ : Dict =use_labels
a__ : Union[str, Any] =hidden_size
a__ : int =num_hidden_layers
a__ : str =num_attention_heads
a__ : Dict =intermediate_size
a__ : Tuple =hidden_act
a__ : List[str] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =type_sequence_label_size
a__ : Tuple =initializer_range
a__ : Optional[Any] =scope
a__ : int =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a__ : Union[str, Any] =(image_size // patch_size) ** 2
a__ : str =num_patches + 2
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : List[Any] =None
if self.use_labels:
a__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Dict =DeiTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a__ : Optional[int] =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : Union[str, Any] =DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a__ : List[Any] =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a__ : str =1
a__ : Union[str, Any] =DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a__ : List[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Optional[int] =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Any =self.type_sequence_label_size
a__ : Optional[int] =DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a__ : Any =model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Any =1
a__ : List[str] =DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a__ : Union[str, Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Tuple =model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.prepare_config_and_inputs()
(
a__
) : str =config_and_inputs
a__ : List[Any] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : List[str] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[Any] = False
_lowercase : List[Any] = False
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Optional[int] =DeiTModelTester(self )
a__ : Union[str, Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
pass
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str =model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str =model_class(SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Any =[*signature.parameters.keys()]
a__ : Optional[int] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any =super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
a__ : str =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a__ : Tuple =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
a__ : Union[str, Any] =self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] =model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : int =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : Tuple =False
a__ : Tuple =True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a__ : Optional[Any] =model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
a__ : Union[str, Any] =self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] =model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] =[
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_ ),
*get_values(SCREAMING_SNAKE_CASE_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
a__ : Any =problem_type["""title"""]
a__ : str =problem_type["""num_labels"""]
a__ : str =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
a__ : Tuple =self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if problem_type["num_labels"] > 1:
a__ : Optional[Any] =inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
a__ : List[Any] =inputs["""labels"""].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as warning_list:
a__ : Any =model(**SCREAMING_SNAKE_CASE_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int =DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
"""simple docstring"""
a__ : Any =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] =DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
SCREAMING_SNAKE_CASE_ )
a__ : List[Any] =self.default_image_processor
a__ : List[str] =prepare_img()
a__ : Tuple =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
a__ : int =model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
a__ : Dict =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] =torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
a__ : List[Any] =self.default_image_processor
a__ : Tuple =prepare_img()
a__ : List[str] =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
a__ : List[str] =inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a__ : Optional[Any] =model(SCREAMING_SNAKE_CASE_ )
| 563 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__A ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(__A ) == 1:
return True
_lowerCamelCase : int = series[1] - series[0]
for index in range(len(__A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__A ) == 0:
raise ValueError("Input list must be a non empty list" )
_lowerCamelCase : Any = 0
for val in series:
answer += val
return answer / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[Any] = '''sew'''
def __init__( self : Dict , a : Any=3_2 , a : Tuple=7_6_8 , a : Optional[int]=1_2 , a : List[Any]=1_2 , a : Tuple=3_0_7_2 , a : Any=2 , a : Tuple="gelu" , a : List[str]=0.1 , a : Tuple=0.1 , a : List[str]=0.1 , a : Dict=0.0 , a : List[str]=0.1 , a : List[str]=0.1 , a : int=0.02 , a : Any=1e-5 , a : List[str]="group" , a : List[Any]="gelu" , a : List[str]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , a : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a : Optional[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a : List[str]=False , a : Optional[int]=1_2_8 , a : int=1_6 , a : List[str]=True , a : List[Any]=0.05 , a : List[str]=1_0 , a : Any=2 , a : int=0.0 , a : Union[str, Any]=1_0 , a : Optional[Any]=0 , a : Optional[int]="mean" , a : Union[str, Any]=False , a : Union[str, Any]=False , a : Union[str, Any]=2_5_6 , a : int=0 , a : Optional[int]=1 , a : List[str]=2 , **a : Dict , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[Any] = feat_extract_norm
lowercase_ : List[str] = feat_extract_activation
lowercase_ : int = list(SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = list(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = list(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[Any] = conv_bias
lowercase_ : Any = num_conv_pos_embeddings
lowercase_ : List[str] = num_conv_pos_embedding_groups
lowercase_ : Union[str, Any] = len(self.conv_dim )
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : List[str] = intermediate_size
lowercase_ : List[Any] = squeeze_factor
lowercase_ : Dict = hidden_act
lowercase_ : Tuple = num_attention_heads
lowercase_ : int = hidden_dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : Tuple = activation_dropout
lowercase_ : List[str] = feat_proj_dropout
lowercase_ : Tuple = final_dropout
lowercase_ : Tuple = layerdrop
lowercase_ : Any = layer_norm_eps
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : int = apply_spec_augment
lowercase_ : Any = mask_time_prob
lowercase_ : int = mask_time_length
lowercase_ : Any = mask_time_min_masks
lowercase_ : List[Any] = mask_feature_prob
lowercase_ : Dict = mask_feature_length
lowercase_ : Any = mask_feature_min_masks
# ctc loss
lowercase_ : List[str] = ctc_loss_reduction
lowercase_ : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowercase_ : int = use_weighted_layer_sum
lowercase_ : List[str] = classifier_proj_size
@property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 620 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
A_ : Optional[int] = sum(__A ) / len(__A ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__a : Any = logging.getLogger()
def snake_case_ ( ) -> Optional[int]:
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
lowercase__ : Optional[Any] = parser.parse_args()
return args.f
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(SCREAMING_SNAKE_CASE_ )
def __a ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
lowercase__ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(SCREAMING_SNAKE_CASE_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_ )
lowercase__ : Any = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_ ) | 397 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase : List[str] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 541 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
set_seed(770)
lowerCAmelCase_ : Dict = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase_ : int = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase_ : List[str] = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase_ : List[Any] = os.path.join(os.path.expanduser('~'), '.cache')
lowerCAmelCase_ : Any = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=False ):
"""simple docstring"""
a_ : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__A , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Any , UpperCamelCase__ : int ):
"""simple docstring"""
os.makedirs(__A , exist_ok=__A )
hf_hub_download(repo_id=__A , filename=__A , local_dir=__A )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any="text" ):
"""simple docstring"""
if model_type == "text":
a_ : Dict = BarkSemanticModel
a_ : Optional[Any] = BarkSemanticConfig
a_ : Dict = BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ : Optional[Any] = BarkCoarseModel
a_ : Any = BarkCoarseConfig
a_ : List[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
a_ : Dict = BarkFineModel
a_ : Any = BarkFineConfig
a_ : Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ : Tuple = F"{model_type}_small" if use_small else model_type
a_ : List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__A ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
a_ : List[str] = torch.load(__A , map_location=__A )
# this is a hack
a_ : Optional[Any] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
a_ : Tuple = model_args["""vocab_size"""]
a_ : Tuple = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ : Any = model_args.pop("""n_head""" )
a_ : int = model_args.pop("""n_embd""" )
a_ : List[Any] = model_args.pop("""n_layer""" )
a_ : List[Any] = ConfigClass(**checkpoint["""model_args"""] )
a_ : Tuple = ModelClass(config=__A )
a_ : str = GenerationConfigClass()
a_ : Optional[int] = model_generation_config
a_ : Optional[int] = checkpoint["""model"""]
# fixup checkpoint
a_ : str = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__A ):
# replace part of the key with corresponding layer name in HF implementation
a_ : Dict = k[len(__A ) :]
for old_layer_name in new_layer_name_dict:
a_ : Optional[Any] = new_k.replace(__A , new_layer_name_dict[old_layer_name] )
a_ : Union[str, Any] = state_dict.pop(__A )
a_ : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ : Optional[int] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
a_ : Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ : List[str] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__A ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(__A ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(__A , strict=__A )
a_ : Union[str, Any] = model.num_parameters(exclude_embeddings=__A )
a_ : int = checkpoint["""best_val_loss"""].item()
logger.info(F"model loaded: {round(n_params/1E6 , 1 )}M params, {round(__A , 3 )} loss" )
model.eval()
model.to(__A )
del checkpoint, state_dict
return model
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ : List[str] = """cpu""" # do conversion on cpu
a_ : Dict = _get_ckpt_path(__A , use_small=__A )
a_ : Tuple = _load_model(__A , __A , model_type=__A , use_small=__A )
# load bark initial model
a_ : int = _bark_load_model(__A , """cpu""" , model_type=__A , use_small=__A )
if model_type == "text":
a_ : Union[str, Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__A ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
a_ : str = 5
a_ : Dict = 10
if model_type in ["text", "coarse"]:
a_ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
a_ : Optional[int] = bark_model(__A )[0]
a_ : Optional[Any] = model(__A )
# take last logits
a_ : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
a_ : int = 3
a_ : Tuple = 8
a_ : int = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ : List[Any] = model(__A , __A )
a_ : Optional[Any] = bark_model(__A , __A )
a_ : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , ):
"""simple docstring"""
a_ : Optional[int] = os.path.join(__A , __A )
a_ : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
a_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
a_ : str = BarkFineConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
a_ : List[Any] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
a_ : List[str] = BarkSemanticModel.from_pretrained(__A )
a_ : List[Any] = BarkCoarseModel.from_pretrained(__A )
a_ : str = BarkFineModel.from_pretrained(__A )
a_ : Dict = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
a_ : Tuple = BarkConfig.from_sub_model_configs(
__A , __A , __A , __A )
a_ : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ : str = BarkModel(__A )
a_ : int = semantic
a_ : Optional[Any] = coarseAcoustic
a_ : Dict = fineAcoustic
a_ : int = codec
a_ : Dict = bark_generation_config
Path(__A ).mkdir(exist_ok=__A )
bark.save_pretrained(__A , repo_id=__A , push_to_hub=__A )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
lowerCAmelCase_ : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 442 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCamelCase_ :
def __init__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = 0
def lowercase ( self ) -> str:
"""simple docstring"""
return self.head == self.tail
def lowercase ( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
self.data.append(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = self.tail + 1
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.data[self.head]
_UpperCamelCase = self.head + 1
return ret
def lowercase ( self ) -> Any:
"""simple docstring"""
return self.tail - self.head
def lowercase ( self ) -> int:
"""simple docstring"""
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return self.data
def lowercase ( self ) -> str:
"""simple docstring"""
return self.left
def lowercase ( self ) -> str:
"""simple docstring"""
return self.right
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return self.height
def lowercase ( self , lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = data
def lowercase ( self , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = node
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = node
def lowercase ( self , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = height
def _lowercase ( a__ : MyNode | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def _lowercase ( a__ : int , a__ : int ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def _lowercase ( a__ : MyNode ) -> MyNode:
"""simple docstring"""
print("left rotation node:" , node.get_data() )
_UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__A )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def _lowercase ( a__ : MyNode ) -> MyNode:
"""simple docstring"""
print("right rotation node:" , node.get_data() )
_UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__A )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def _lowercase ( a__ : MyNode ) -> MyNode:
"""simple docstring"""
_UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__A ) )
return right_rotation(__A )
def _lowercase ( a__ : MyNode ) -> MyNode:
"""simple docstring"""
_UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__A ) )
return left_rotation(__A )
def _lowercase ( a__ : MyNode | None , a__ : Any ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(__A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_UpperCamelCase = right_rotation(__A )
else:
_UpperCamelCase = lr_rotation(__A )
else:
node.set_right(insert_node(node.get_right() , __A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_UpperCamelCase = rl_rotation(__A )
else:
_UpperCamelCase = left_rotation(__A )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
return node
def _lowercase ( a__ : MyNode ) -> Any:
"""simple docstring"""
while True:
_UpperCamelCase = root.get_right()
if right_child is None:
break
_UpperCamelCase = right_child
return root.get_data()
def _lowercase ( a__ : MyNode ) -> Any:
"""simple docstring"""
while True:
_UpperCamelCase = root.get_left()
if left_child is None:
break
_UpperCamelCase = left_child
return root.get_data()
def _lowercase ( a__ : MyNode , a__ : Any ) -> MyNode | None:
"""simple docstring"""
_UpperCamelCase = root.get_left()
_UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_UpperCamelCase = get_left_most(__A )
root.set_data(__A )
root.set_right(del_node(__A , __A ) )
elif left_child is not None:
_UpperCamelCase = left_child
elif right_child is not None:
_UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(__A , __A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__A , __A ) )
if get_height(__A ) - get_height(__A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_UpperCamelCase = left_rotation(__A )
else:
_UpperCamelCase = rl_rotation(__A )
elif get_height(__A ) - get_height(__A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_UpperCamelCase = right_rotation(__A )
else:
_UpperCamelCase = lr_rotation(__A )
_UpperCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__A )
return root
class lowerCamelCase_ :
def __init__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = None
def lowercase ( self ) -> Dict:
"""simple docstring"""
return get_height(self.root )
def lowercase ( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
print("insert:" + str(SCREAMING_SNAKE_CASE_ ) )
_UpperCamelCase = insert_node(self.root , SCREAMING_SNAKE_CASE_ )
def lowercase ( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
print("delete:" + str(SCREAMING_SNAKE_CASE_ ) )
if self.root is None:
print("Tree is empty!" )
return
_UpperCamelCase = del_node(self.root , SCREAMING_SNAKE_CASE_ )
def __str__( self , ) -> Union[str, Any]: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_UpperCamelCase = """"""
_UpperCamelCase = MyQueue()
q.push(self.root )
_UpperCamelCase = self.get_height()
if layer == 0:
return output
_UpperCamelCase = 0
while not q.is_empty():
_UpperCamelCase = q.pop()
_UpperCamelCase = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(SCREAMING_SNAKE_CASE_ )
q.push(SCREAMING_SNAKE_CASE_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_UpperCamelCase = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , SCREAMING_SNAKE_CASE_ ) - 1:
_UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _lowercase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCAmelCase = AVLtree()
__lowerCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 147 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
class _a :
def __init__( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = {} # Mapping from char to TrieNode
lowercase__ = False
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Dict ) -> Optional[Any]:
"""simple docstring"""
for word in words:
self.insert(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self
for char in word:
if char not in curr.nodes:
lowercase__ = TrieNode()
lowercase__ = curr.nodes[char]
lowercase__ = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = self
for char in word:
if char not in curr.nodes:
return False
lowercase__ = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> int:
"""simple docstring"""
def _delete(UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: Dict ) -> bool:
if index == len(SCREAMING_SNAKE_CASE_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase__ = False
return len(curr.nodes ) == 0
lowercase__ = word[index]
lowercase__ = curr.nodes.get(SCREAMING_SNAKE_CASE_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase__ = _delete(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE_ , 0 )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if node.is_leaf:
print(__A , end=''' ''' )
for key, value in node.nodes.items():
print_words(__A , word + key )
def _a ( ):
"""simple docstring"""
lowercase__ = """banana bananas bandana band apple all beast""".split()
lowercase__ = TrieNode()
root.insert_many(__A )
# print_words(root, "")
assert all(root.find(__A ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print(str(__A ) , '''works!''' if passes else '''doesn\'t work :(''' )
def _a ( ):
"""simple docstring"""
assert test_trie()
def _a ( ):
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 43 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase_ : Optional[Any] = re.compile(r'\b(a|an|the)\b', re.UNICODE)
UpperCAmelCase_ : Optional[int] = None
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_SCREAMING_SNAKE_CASE : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE__ ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(SCREAMING_SNAKE_CASE__ ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not s:
return []
return normalize_answer(__A ).split()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = get_tokens(__A )
_SCREAMING_SNAKE_CASE : str = get_tokens(__A )
_SCREAMING_SNAKE_CASE : Dict = collections.Counter(__A ) & collections.Counter(__A )
_SCREAMING_SNAKE_CASE : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_SCREAMING_SNAKE_CASE : List[Any] = 1.0 * num_same / len(__A )
_SCREAMING_SNAKE_CASE : int = 1.0 * num_same / len(__A )
_SCREAMING_SNAKE_CASE : Dict = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = {}
_SCREAMING_SNAKE_CASE : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_SCREAMING_SNAKE_CASE : str = qa["""id"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_SCREAMING_SNAKE_CASE : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
_SCREAMING_SNAKE_CASE : Dict = preds[qid]
# Take max over all gold answers
_SCREAMING_SNAKE_CASE : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
_SCREAMING_SNAKE_CASE : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = {}
for qid, s in scores.items():
_SCREAMING_SNAKE_CASE : Any = na_probs[qid] > na_prob_thresh
if pred_na:
_SCREAMING_SNAKE_CASE : str = float(not qid_to_has_ans[qid] )
else:
_SCREAMING_SNAKE_CASE : List[Any] = s
return new_scores
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
"""simple docstring"""
if not qid_list:
_SCREAMING_SNAKE_CASE : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_SCREAMING_SNAKE_CASE : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for k in new_eval:
_SCREAMING_SNAKE_CASE : str = new_eval[k]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = sorted(__A , key=lambda SCREAMING_SNAKE_CASE__ : na_probs[k] )
_SCREAMING_SNAKE_CASE : Any = 0.0
_SCREAMING_SNAKE_CASE : str = 1.0
_SCREAMING_SNAKE_CASE : Tuple = 0.0
_SCREAMING_SNAKE_CASE : str = [1.0]
_SCREAMING_SNAKE_CASE : Any = [0.0]
_SCREAMING_SNAKE_CASE : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_SCREAMING_SNAKE_CASE : str = true_pos / float(i + 1 )
_SCREAMING_SNAKE_CASE : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 1_0_0.0 * avg_prec}
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
_SCREAMING_SNAKE_CASE : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_SCREAMING_SNAKE_CASE : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_SCREAMING_SNAKE_CASE : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_SCREAMING_SNAKE_CASE : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
_SCREAMING_SNAKE_CASE : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not qid_list:
return
_SCREAMING_SNAKE_CASE : int = [na_probs[k] for k in qid_list]
_SCREAMING_SNAKE_CASE : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_SCREAMING_SNAKE_CASE : str = num_no_ans
_SCREAMING_SNAKE_CASE : Optional[Any] = cur_score
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0
_SCREAMING_SNAKE_CASE : List[Any] = sorted(__A , key=lambda SCREAMING_SNAKE_CASE__ : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_SCREAMING_SNAKE_CASE : Dict = scores[qid]
else:
if preds[qid]:
_SCREAMING_SNAKE_CASE : Dict = -1
else:
_SCREAMING_SNAKE_CASE : str = 0
cur_score += diff
if cur_score > best_score:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_score
_SCREAMING_SNAKE_CASE : List[Any] = na_probs[qid]
return 1_0_0.0 * best_score / len(__A ), best_thresh
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = find_best_thresh(__A , __A , __A , __A )
_SCREAMING_SNAKE_CASE : str = find_best_thresh(__A , __A , __A , __A )
_SCREAMING_SNAKE_CASE : List[str] = best_exact
_SCREAMING_SNAKE_CASE : List[Any] = exact_thresh
_SCREAMING_SNAKE_CASE : Optional[Any] = best_fa
_SCREAMING_SNAKE_CASE : Optional[int] = fa_thresh
def snake_case_ ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
_SCREAMING_SNAKE_CASE : Dict = json.load(__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_SCREAMING_SNAKE_CASE : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_SCREAMING_SNAKE_CASE : Any = json.load(__A )
else:
_SCREAMING_SNAKE_CASE : Any = {k: 0.0 for k in preds}
_SCREAMING_SNAKE_CASE : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
_SCREAMING_SNAKE_CASE : Dict = [k for k, v in qid_to_has_ans.items() if v]
_SCREAMING_SNAKE_CASE : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
_SCREAMING_SNAKE_CASE : Optional[Any] = get_raw_scores(__A , __A )
_SCREAMING_SNAKE_CASE : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
_SCREAMING_SNAKE_CASE : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
_SCREAMING_SNAKE_CASE : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
_SCREAMING_SNAKE_CASE : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
_SCREAMING_SNAKE_CASE : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 533 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
def UpperCamelCase_( _snake_case : int = 2000000 ):
"""simple docstring"""
__a =[0 for i in range(n + 1 )]
__a =1
__a =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __A ):
__a =1
__a =0
for i in range(__A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
import gc
import threading
import time
import psutil
import torch
class __lowerCAmelCase :
def __init__( self ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] =psutil.Process()
a__ : Union[str, Any] =False
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : List[Any] =-1
while True:
a__ : List[str] =max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] =True
a__ : Optional[Any] =threading.Thread(target=self.peak_monitor )
a__ : int =True
self.thread.start()
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =False
self.thread.join()
return self.cpu_memory_peak
UpperCAmelCase : int = PeakCPUMemory()
def _A ( ):
"""simple docstring"""
a__ : Union[str, Any] ={"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a__ : str =psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
a__ : Union[str, Any] =torch.cuda.memory_allocated(__A )
torch.cuda.reset_peak_memory_stats()
return measures
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Any ={"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a__ : Union[str, Any] =(psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
a__ : List[str] =(cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
a__ : List[Any] =(torch.cuda.memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
a__ : Optional[int] =(torch.cuda.max_memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
return measures
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(__A )]:.2f}MiB''' )
a__ : Tuple =measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 563 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
@staticmethod
def lowerCamelCase_ ( *__A : Dict,**__A : Optional[int] ):
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@require_torch
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Dict = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification",)
_lowerCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : List[str] = image_classifier(SCREAMING_SNAKE_CASE_,candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
],)
_lowerCamelCase : Optional[Any] = image_classifier([image] * 5,candidate_labels=["A", "B", "C"],batch_size=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
],)
@require_tf
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification",framework="tf" )
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Any = image_classifier(SCREAMING_SNAKE_CASE_,candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],)
_lowerCamelCase : int = image_classifier([image] * 5,candidate_labels=["A", "B", "C"],batch_size=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": 0.333, "label": ANY(SCREAMING_SNAKE_CASE_ )},
],
],)
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = pipeline(
task="zero-shot-image-classification",model="openai/clip-vit-base-patch32",)
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : str = image_classifier(SCREAMING_SNAKE_CASE_,candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],)
_lowerCamelCase : Optional[Any] = image_classifier([image] * 5,candidate_labels=["cat", "plane", "remote"],batch_size=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5,)
@slow
@require_tf
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = pipeline(
task="zero-shot-image-classification",model="openai/clip-vit-base-patch32",framework="tf" )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_classifier(SCREAMING_SNAKE_CASE_,candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],)
_lowerCamelCase : Optional[int] = image_classifier([image] * 5,candidate_labels=["cat", "plane", "remote"],batch_size=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ),[
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5,) | 44 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 620 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase = 16
UpperCamelCase = 32
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
A_ : str = AutoTokenizer.from_pretrained(__A )
A_ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
A_ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : str = datasets.map(
__A , batched=__A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
A_ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
A_ : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.eval()
A_ : Optional[Any] = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : Tuple = model(**__A )
A_ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A_ : str = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__A ) - 1:
A_ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A_ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__A , references=__A , )
A_ : Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Any = config["""lr"""]
A_ : Dict = int(config['''num_epochs'''] )
A_ : Union[str, Any] = int(config['''seed'''] )
A_ : Union[str, Any] = int(config['''batch_size'''] )
A_ : List[Any] = args.model_name_or_path
set_seed(__A )
A_ : str = get_dataloaders(__A , __A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__A , return_dict=__A )
# Instantiate optimizer
A_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : Dict = optimizer_cls(params=model.parameters() , lr=__A )
if accelerator.state.deepspeed_plugin is not None:
A_ : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A_ : Dict = 1
A_ : Tuple = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=0 , num_training_steps=__A , )
else:
A_ : Dict = DummyScheduler(__A , total_num_steps=__A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ : Tuple = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
A_ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : str = 0
A_ : Dict = evaluate.load('''glue''' , '''mrpc''' )
A_ : int = num_epochs
if args.partial_train_epoch is not None:
A_ : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A_ : List[str] = args.resume_from_checkpoint.split('''epoch_''' )[1]
A_ : List[str] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A_ : Optional[Any] = int(__A ) + 1
A_ : Optional[int] = evaluation_loop(__A , __A , __A , __A )
accelerator.print('''resumed checkpoint performance:''' , __A )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
A_ : List[str] = json.load(__A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A_ : int = {}
for epoch in range(__A , __A ):
model.train()
for step, batch in enumerate(__A ):
A_ : Dict = model(**__A )
A_ : Union[str, Any] = outputs.loss
A_ : int = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A_ : Any = f'''epoch_{epoch}'''
A_ : Optional[int] = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
A_ : Optional[int] = evaluation_loop(__A , __A , __A , __A )
A_ : str = accuracy
A_ : Optional[int] = lr_scheduler.get_lr()[0]
A_ : str = optimizer.param_groups[0]["""lr"""]
A_ : Tuple = epoch
A_ : Optional[int] = overall_step
accelerator.print(f'''epoch {epoch}:''' , __A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__A , __A )
def _SCREAMING_SNAKE_CASE ( ):
A_ : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__A , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__A , )
parser.add_argument(
'''--output_dir''' , type=__A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__A , default=__A , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__A , default=__A , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__A , default=2 , help='''Number of train epochs.''' , )
A_ : int = parser.parse_args()
A_ : List[Any] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 590 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.