code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any]=7 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: List[str]=18 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=400 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=True ,):
'''simple docstring'''
_lowerCamelCase : Dict = size if size is not None else {"height": 18, "width": 18}
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : Optional[Any] = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : int = do_normalize
def _lowercase ( self: str ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"clusters" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 18} )
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase : List[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase ,obj[key] ) )
else:
self.assertEqual(obj[key] ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple = os.path.join(__lowerCAmelCase ,"image_processor.json" )
image_processor_first.to_json_file(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.image_processing_class.from_json_file(__lowerCAmelCase ).to_dict()
_lowerCamelCase : str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_pretrained(__lowerCAmelCase ).to_dict()
_lowerCamelCase : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,__lowerCAmelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
_lowerCamelCase : Any = Image.open(dataset[4]["file"] )
_lowerCamelCase : Any = Image.open(dataset[5]["file"] )
_lowerCamelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
_lowerCamelCase : Union[str, Any] = prepare_images()
# test non-batched
_lowerCamelCase : Any = image_processing(images[0] ,return_tensors="pt" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1_024) )
_lowerCamelCase : Optional[int] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,__lowerCAmelCase )
# test batched
_lowerCamelCase : Optional[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1_024) )
_lowerCamelCase : List[str] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,__lowerCAmelCase )
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = False ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : str = F"""Expected string as input, found {type(_lowerCamelCase )}"""
raise ValueError(_lowerCamelCase )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : List[Any] = F"""Expected boolean as use_pascal parameter, found {type(_lowerCamelCase )}"""
raise ValueError(_lowerCamelCase )
_lowerCamelCase : str = input_str.split("_" )
_lowerCamelCase : str = 0 if use_pascal else 1
_lowerCamelCase : List[Any] = words[start_index:]
_lowerCamelCase : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCamelCase : int = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : int = LxmertConfig.from_json_file(_lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase : int = LxmertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | None:
'''simple docstring'''
_lowerCamelCase : str = ""
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
_lowerCamelCase : int = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_( _lowerCamelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
_lowerCamelCase : list[int]
_lowerCamelCase : list[str]
_lowerCamelCase : str
_lowerCamelCase : str
_lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
_lowerCamelCase : Optional[int] = [int(_lowerCamelCase ) for number in data.strip().split("," )]
_lowerCamelCase : List[Any] = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
_lowerCamelCase : Union[str, Any] = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
_lowerCamelCase : List[str] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_lowerCamelCase : int = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Any = idalabel
_lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
_lowerCamelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
_lowerCamelCase : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCamelCase : Tuple = [2, 2, 20]
_lowerCamelCase : List[str] = [3, 12, 16]
_lowerCamelCase : Optional[Any] = [192, 768, 1024]
_lowerCamelCase : Union[str, Any] = CvtForImageClassification(_lowerCamelCase )
_lowerCamelCase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : Dict = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCamelCase : Tuple = list_of_state_dict + cls_token(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list_of_state_dict + embeddings(_lowerCamelCase )
for cnt in range(config.depth[idx] ):
_lowerCamelCase : Dict = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.dummy_uncond_unet
_lowerCamelCase : Optional[Any] = ScoreSdeVeScheduler()
_lowerCamelCase : str = ScoreSdeVePipeline(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
sde_ve.to(__lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = sde_ve(num_inference_steps=2 ,output_type="numpy" ,generator=__lowerCAmelCase ).images
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : int = sde_ve(num_inference_steps=2 ,output_type="numpy" ,generator=__lowerCAmelCase ,return_dict=__lowerCAmelCase )[
0
]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "google/ncsnpp-church-256"
_lowerCamelCase : str = UNetaDModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = ScoreSdeVePipeline(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
sde_ve.to(__lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : Tuple = sde_ve(num_inference_steps=10 ,output_type="numpy" ,generator=__lowerCAmelCase ).images
_lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Optional[int] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase : Union[str, Any] = os.path.join(git_repo_path, '''src''', '''diffusers''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = find_backend(" if not is_torch_available():" )
self.assertEqual(__lowerCAmelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_lowerCamelCase : Union[str, Any] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_lowerCamelCase : List[str] = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers_and_onnx" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers" ,__lowerCAmelCase )
self.assertIn("flax_and_transformers" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers_and_onnx" ,__lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,"\nCONSTANT = None\n" )
_lowerCamelCase : List[str] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
__lowerCAmelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_lowerCamelCase : Optional[int] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
_lowerCamelCase : Optional[int] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
_lowerCamelCase : Union[str, Any] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,__lowerCAmelCase )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_lowerCAmelCase : Tuple = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_lowerCAmelCase : int = '''▁'''
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Dict=False ,__lowerCAmelCase: int="[CLS]" ,__lowerCAmelCase: Optional[Any]="[SEP]" ,__lowerCAmelCase: List[str]="<unk>" ,__lowerCAmelCase: Optional[Any]="[SEP]" ,__lowerCAmelCase: Optional[Any]="<pad>" ,__lowerCAmelCase: Optional[int]="[CLS]" ,__lowerCAmelCase: str="[MASK]" ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
_lowerCamelCase : Tuple = (
AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase ,normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase ,__lowerCAmelCase )
else mask_token
)
_lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase ,remove_space=__lowerCAmelCase ,keep_accents=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCAmelCase ,)
_lowerCamelCase : Optional[int] = do_lower_case
_lowerCamelCase : Optional[int] = remove_space
_lowerCamelCase : int = keep_accents
_lowerCamelCase : Any = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return len(self.sp_model )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.__dict__.copy()
_lowerCamelCase : List[Any] = None
return state
def __setstate__( self: Any ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self: Any ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
if self.remove_space:
_lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
_lowerCamelCase : str = inputs
_lowerCamelCase : Union[str, Any] = outputs.replace("``" ,"\"" ).replace("''" ,"\"" )
if not self.keep_accents:
_lowerCamelCase : List[str] = unicodedata.normalize("NFKD" ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
_lowerCamelCase : Any = outputs.lower()
return outputs
def _lowercase ( self: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.preprocess_text(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.sp_model.encode(__lowerCAmelCase ,out_type=__lowerCAmelCase )
_lowerCamelCase : Dict = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCamelCase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase ,"" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCamelCase : Optional[Any] = cur_pieces[1:]
else:
_lowerCamelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : Dict = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowerCamelCase : Any = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _lowercase ( self: Any ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : str = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase ,token_ids_a=__lowerCAmelCase ,already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowercase ( self: str ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase : str = os.path.join(
__lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase ,"wb" ) as fi:
_lowerCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase ) -> list:
'''simple docstring'''
_lowerCamelCase : List[str] = [True] * n
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : str = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCamelCase : List[Any] = i * 2
while index < n:
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[Any] = index + i
_lowerCamelCase : Optional[int] = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def lowerCamelCase_( _lowerCamelCase = 999966663333 ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = math.floor(math.sqrt(_lowerCamelCase ) ) + 100
_lowerCamelCase : str = prime_sieve(_lowerCamelCase )
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[Any] = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCamelCase : Tuple = primes[prime_index + 1]
_lowerCamelCase : Dict = last_prime**2
_lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
_lowerCamelCase : List[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCamelCase : int = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCamelCase : Union[str, Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCamelCase : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: List[Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Optional[int] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Any ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Union[str, Any] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: int ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: int ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Optional[Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: str ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: List[Any] ,*__lowerCAmelCase: str ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Union[str, Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Union[str, Any] ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: List[str] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Tuple ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Union[str, Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: int ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def _lowercase ( cls: str ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
requires_backends(cls ,["torch", "transformers", "onnx"] )
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
import os
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
with open(os.path.dirname(_lowerCamelCase ) + "/p022_names.txt" ) as file:
_lowerCamelCase : List[str] = str(file.readlines()[0] )
_lowerCamelCase : Optional[Any] = names.replace("\"" , "" ).split("," )
names.sort()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Tuple = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
_lowerCamelCase : int = 0
return total_score
if __name__ == "__main__":
print(solution())
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,**__lowerCAmelCase: int ):
'''simple docstring'''
requires_backends(self ,["bs4"] )
super().__init__(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : Tuple = []
_lowerCamelCase : Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowerCamelCase : List[str] = parent.find_all(child.name ,recursive=__lowerCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCAmelCase ) else next(i for i, s in enumerate(__lowerCAmelCase ,1 ) if s is child ) )
_lowerCamelCase : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = BeautifulSoup(__lowerCAmelCase ,"html.parser" )
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[Any] = []
for element in html_code.descendants:
if type(__lowerCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_lowerCamelCase : List[str] = html.unescape(__lowerCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.xpath_soup(__lowerCAmelCase )
stringaxtag_seq.append(__lowerCAmelCase )
stringaxsubs_seq.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = ""
for tagname, subs in zip(__lowerCAmelCase ,__lowerCAmelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self: List[str] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = False
# Check that strings has a valid type
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = True
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
if len(__lowerCAmelCase ) == 0 or isinstance(html_strings[0] ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(__lowerCAmelCase )}.""" )
_lowerCamelCase : Optional[int] = bool(isinstance(__lowerCAmelCase ,(list, tuple) ) and (isinstance(html_strings[0] ,__lowerCAmelCase )) )
if not is_batched:
_lowerCamelCase : Dict = [html_strings]
# Get nodes + xpaths
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[Any] = []
for html_string in html_strings:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self.get_three_from_single(__lowerCAmelCase )
nodes.append(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
for node, tag_list, sub_list in zip(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = self.construct_xpath(__lowerCAmelCase ,__lowerCAmelCase )
xpath_strings.append(__lowerCAmelCase )
xpaths.append(__lowerCAmelCase )
# return as Dict
_lowerCamelCase : Optional[Any] = {"nodes": nodes, "xpaths": xpaths}
_lowerCamelCase : Union[str, Any] = BatchFeature(data=__lowerCAmelCase ,tensor_type=__lowerCAmelCase )
return encoded_inputs
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
_lowerCAmelCase : Dict = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
_lowerCAmelCase : Optional[Any] = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
_lowerCAmelCase : Optional[int] = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , ) -> List[str]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCamelCase : Union[str, Any] = new_id
# turn into Numpy arrays
_lowerCamelCase : Dict = np.array(_lowerCamelCase )
_lowerCamelCase : str = np.array(_lowerCamelCase )
if reduce_labels:
_lowerCamelCase : Union[str, Any] = 255
_lowerCamelCase : Optional[Any] = label - 1
_lowerCamelCase : Optional[Any] = 255
_lowerCamelCase : List[Any] = label != ignore_index
_lowerCamelCase : int = np.not_equal(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = pred_label[mask]
_lowerCamelCase : List[str] = np.array(_lowerCamelCase )[mask]
_lowerCamelCase : Union[str, Any] = pred_label[pred_label == label]
_lowerCamelCase : int = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : List[Any] = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Optional[Any] = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : int = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = intersect_and_union(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = total_intersect_and_union(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# compute metrics
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Tuple = total_area_intersect.sum() / total_area_label.sum()
_lowerCamelCase : List[Any] = total_area_intersect / total_area_union
_lowerCamelCase : List[str] = total_area_intersect / total_area_label
_lowerCamelCase : List[Any] = np.nanmean(_lowerCamelCase )
_lowerCamelCase : Dict = np.nanmean(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = all_acc
_lowerCamelCase : List[Any] = iou
_lowerCamelCase : Union[str, Any] = acc
if nan_to_num is not None:
_lowerCamelCase : str = {metric: np.nan_to_num(_lowerCamelCase , nan=_lowerCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) ,reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] ,)
def _lowercase ( self: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: bool ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[Dict[int, int]] = None ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = mean_iou(
results=__lowerCAmelCase ,gt_seg_maps=__lowerCAmelCase ,num_labels=__lowerCAmelCase ,ignore_index=__lowerCAmelCase ,nan_to_num=__lowerCAmelCase ,label_map=__lowerCAmelCase ,reduce_labels=__lowerCAmelCase ,)
return iou_result
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Tuple = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
_lowerCamelCase, _lowerCamelCase : Any = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowerCamelCase : Any = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase )
assert base_extractor.is_extractable(_lowerCamelCase )
_lowerCamelCase : Tuple = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(_lowerCamelCase , _lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCamelCase : Union[str, Any] = file_path.read_text(encoding="utf-8" )
else:
_lowerCamelCase : Any = output_path.read_text(encoding="utf-8" )
_lowerCamelCase : Optional[int] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> str:
'''simple docstring'''
_lowerCamelCase : str = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
_lowerCamelCase : List[Any] = input_paths[compression_format]
if input_path is None:
_lowerCamelCase : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase )
_lowerCamelCase : Tuple = Extractor.infer_extractor_format(_lowerCamelCase )
assert extractor_format is not None
_lowerCamelCase : int = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCamelCase : str = file_path.read_text(encoding="utf-8" )
else:
_lowerCamelCase : Dict = output_path.read_text(encoding="utf-8" )
_lowerCamelCase : Union[str, Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
import tarfile
_lowerCamelCase : Optional[Any] = tmp_path / "data_dot_dot"
directory.mkdir()
_lowerCamelCase : Optional[int] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(_lowerCamelCase , "w" ) as f:
f.add(_lowerCamelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
_lowerCamelCase : Optional[Any] = tmp_path / "data_sym_link"
directory.mkdir()
_lowerCamelCase : List[Any] = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=_lowerCamelCase )
with tarfile.TarFile(_lowerCamelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
_lowerCamelCase : Any = insecure_tar_files[insecure_tar_file]
_lowerCamelCase : Tuple = tmp_path / "extracted"
TarExtractor.extract(_lowerCamelCase , _lowerCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
_lowerCamelCase : Dict = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(_lowerCamelCase )
assert zipfile.is_zipfile(str(_lowerCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_lowerCamelCase ) # but we're right
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( _a ):
lowerCAmelCase__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowerCAmelCase__ = 'CIDAS/clipseg-rd64-refined'
lowerCAmelCase__ = 'image_segmenter'
lowerCAmelCase__ = CLIPSegForImageSegmentation
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['image']
def __init__( self: List[Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(self ,["vision"] )
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: "Image" ,__lowerCAmelCase: str ):
'''simple docstring'''
return self.pre_processor(text=[label] ,images=[image] ,padding=__lowerCAmelCase ,return_tensors="pt" )
def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
with torch.no_grad():
_lowerCamelCase : Optional[int] = self.model(**__lowerCAmelCase ).logits
return logits
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : int = outputs.cpu().detach().numpy()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = ShapEImgaImgPipeline
lowerCAmelCase__ = ['image']
lowerCAmelCase__ = ['image']
lowerCAmelCase__ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCAmelCase__ = False
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return 32
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return 32
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return 8
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
_lowerCamelCase : Any = CLIPVisionModel(__lowerCAmelCase )
return model
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=__lowerCAmelCase ,do_normalize=__lowerCAmelCase ,do_resize=__lowerCAmelCase ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=224 ,)
return image_processor
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : int = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_lowerCamelCase : str = PriorTransformer(**__lowerCAmelCase )
return model
@property
def _lowercase ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_lowerCamelCase : Union[str, Any] = ShapERenderer(**__lowerCAmelCase )
return model
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.dummy_prior
_lowerCamelCase : List[Any] = self.dummy_image_encoder
_lowerCamelCase : Union[str, Any] = self.dummy_image_processor
_lowerCamelCase : Tuple = self.dummy_renderer
_lowerCamelCase : Tuple = HeunDiscreteScheduler(
beta_schedule="exp" ,num_train_timesteps=1_024 ,prediction_type="sample" ,use_karras_sigmas=__lowerCAmelCase ,clip_sample=__lowerCAmelCase ,clip_sample_range=1.0 ,)
_lowerCamelCase : Union[str, Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any]=0 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith("mps" ):
_lowerCamelCase : Tuple = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = "cpu"
_lowerCamelCase : List[str] = self.get_dummy_components()
_lowerCamelCase : int = self.pipeline_class(**__lowerCAmelCase )
_lowerCamelCase : Tuple = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = output.images[0]
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCamelCase : Union[str, Any] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self: List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = torch_device == "cpu"
_lowerCamelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=__lowerCAmelCase ,relax_max_difference=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.get_dummy_components()
_lowerCamelCase : Optional[int] = self.pipeline_class(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = 1
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : str = self.get_dummy_inputs(__lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowerCamelCase : Optional[Any] = batch_size * [inputs[key]]
_lowerCamelCase : str = pipe(**__lowerCAmelCase ,num_images_per_prompt=__lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
_lowerCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
_lowerCamelCase : Optional[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
_lowerCamelCase : Any = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : Dict = pipe(
__lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="np" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCAmelCase ,__lowerCAmelCase )
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'autoformer'
lowerCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self: Optional[int] ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: str = "student_t" ,__lowerCAmelCase: str = "nll" ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: int = 64 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 32 ,__lowerCAmelCase: int = 32 ,__lowerCAmelCase: str = "gelu" ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 0.02 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: str=True ,__lowerCAmelCase: int = 10 ,__lowerCAmelCase: int = 25 ,__lowerCAmelCase: int = 3 ,**__lowerCAmelCase: List[str] ,):
'''simple docstring'''
_lowerCamelCase : str = prediction_length
_lowerCamelCase : Union[str, Any] = context_length if context_length is not None else prediction_length
_lowerCamelCase : Tuple = distribution_output
_lowerCamelCase : Tuple = loss
_lowerCamelCase : Any = input_size
_lowerCamelCase : Tuple = num_time_features
_lowerCamelCase : Optional[Any] = lags_sequence
_lowerCamelCase : Optional[Any] = scaling
_lowerCamelCase : int = num_dynamic_real_features
_lowerCamelCase : Dict = num_static_real_features
_lowerCamelCase : int = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_lowerCamelCase : int = cardinality
else:
_lowerCamelCase : Any = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_lowerCamelCase : Optional[Any] = embedding_dimension
else:
_lowerCamelCase : Tuple = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
_lowerCamelCase : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
_lowerCamelCase : List[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_lowerCamelCase : str = d_model
_lowerCamelCase : Optional[int] = encoder_attention_heads
_lowerCamelCase : int = decoder_attention_heads
_lowerCamelCase : Tuple = encoder_ffn_dim
_lowerCamelCase : Union[str, Any] = decoder_ffn_dim
_lowerCamelCase : Any = encoder_layers
_lowerCamelCase : Any = decoder_layers
_lowerCamelCase : Optional[Any] = dropout
_lowerCamelCase : Dict = attention_dropout
_lowerCamelCase : int = activation_dropout
_lowerCamelCase : Dict = encoder_layerdrop
_lowerCamelCase : Union[str, Any] = decoder_layerdrop
_lowerCamelCase : Optional[Any] = activation_function
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : str = use_cache
# Autoformer
_lowerCamelCase : Dict = label_length
_lowerCamelCase : Optional[Any] = moving_average
_lowerCamelCase : Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: int ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 2 , _lowerCamelCase = 1 , _lowerCamelCase = 3 , ) -> int | None:
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
return (pow(_lowerCamelCase , 2 ) + step) % modulus
for _ in range(_lowerCamelCase ):
# These track the position within the cycle detection logic.
_lowerCamelCase : Union[str, Any] = seed
_lowerCamelCase : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCamelCase : List[str] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCamelCase : Optional[int] = gcd(hare - tortoise , _lowerCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCamelCase : Any = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
_lowerCAmelCase : List[str] = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str]=7 ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Union[str, Any]=30 ,__lowerCAmelCase: List[str]=400 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: int=[0.5, 0.5, 0.5] ,__lowerCAmelCase: int=[0.5, 0.5, 0.5] ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Any=1 / 255 ,__lowerCAmelCase: Optional[int]=True ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
_lowerCamelCase : int = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : str = max_resolution
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : str = size
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : str = image_mean
_lowerCamelCase : Optional[Any] = image_std
_lowerCamelCase : Tuple = do_rescale
_lowerCamelCase : str = rescale_factor
_lowerCamelCase : Dict = do_pad
def _lowercase ( self: List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: str=False ):
'''simple docstring'''
if not batched:
_lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(__lowerCAmelCase ,Image.Image ):
_lowerCamelCase, _lowerCamelCase : Optional[int] = image.size
else:
_lowerCamelCase, _lowerCamelCase : List[str] = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : Any = int(self.size["shortest_edge"] * h / w )
_lowerCamelCase : List[Any] = self.size["shortest_edge"]
elif w > h:
_lowerCamelCase : str = self.size["shortest_edge"]
_lowerCamelCase : Dict = int(self.size["shortest_edge"] * w / h )
else:
_lowerCamelCase : Union[str, Any] = self.size["shortest_edge"]
_lowerCamelCase : Any = self.size["shortest_edge"]
else:
_lowerCamelCase : Union[str, Any] = []
for image in image_inputs:
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Optional[int] = max(__lowerCAmelCase ,key=lambda __lowerCAmelCase : item[0] )[0]
_lowerCamelCase : int = max(__lowerCAmelCase ,key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = YolosImageProcessor if is_vision_available() else None
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = YolosImageProcessingTester(self )
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad ,__lowerCAmelCase )
_lowerCamelCase : Any = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size ,{"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowerCamelCase, _lowerCamelCase : str = self.image_processor_tester.get_expected_values(__lowerCAmelCase ,batched=__lowerCAmelCase )
_lowerCamelCase : Any = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
_lowerCamelCase, _lowerCamelCase : Any = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowerCamelCase : Tuple = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(__lowerCAmelCase ,batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowerCamelCase : str = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
_lowerCamelCase, _lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(__lowerCAmelCase ,batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase : Any = self.image_processing_class(do_resize=__lowerCAmelCase ,do_normalize=__lowerCAmelCase ,do_rescale=__lowerCAmelCase )
# create random PyTorch tensors
_lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase : List[Any] = image_processing_a.pad(__lowerCAmelCase ,return_tensors="pt" )
_lowerCamelCase : List[str] = image_processing_a(__lowerCAmelCase ,return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] ,encoded_images["pixel_values"] ,atol=1e-4 ) )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" ,"r" ) as f:
_lowerCamelCase : Optional[Any] = json.loads(f.read() )
_lowerCamelCase : Optional[Any] = {"image_id": 39_769, "annotations": target}
# encode them
_lowerCamelCase : int = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
_lowerCamelCase : Optional[Any] = image_processing(images=__lowerCAmelCase ,annotations=__lowerCAmelCase ,return_tensors="pt" )
# verify pixel values
_lowerCamelCase : Tuple = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape ,__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
# verify area
_lowerCamelCase : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,__lowerCAmelCase ) )
# verify boxes
_lowerCamelCase : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,__lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,__lowerCAmelCase ,atol=1e-3 ) )
# verify image_id
_lowerCamelCase : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,__lowerCAmelCase ) )
# verify is_crowd
_lowerCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,__lowerCAmelCase ) )
# verify class_labels
_lowerCamelCase : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,__lowerCAmelCase ) )
# verify orig_size
_lowerCamelCase : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,__lowerCAmelCase ) )
# verify size
_lowerCamelCase : Dict = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,__lowerCAmelCase ) )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" ,"r" ) as f:
_lowerCamelCase : Optional[int] = json.loads(f.read() )
_lowerCamelCase : Optional[int] = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
_lowerCamelCase : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_lowerCamelCase : Optional[Any] = YolosImageProcessor(format="coco_panoptic" )
_lowerCamelCase : Any = image_processing(images=__lowerCAmelCase ,annotations=__lowerCAmelCase ,masks_path=__lowerCAmelCase ,return_tensors="pt" )
# verify pixel values
_lowerCamelCase : Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape ,__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
# verify area
_lowerCamelCase : Any = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,__lowerCAmelCase ) )
# verify boxes
_lowerCamelCase : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,__lowerCAmelCase ,atol=1e-3 ) )
# verify image_id
_lowerCamelCase : Any = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,__lowerCAmelCase ) )
# verify is_crowd
_lowerCamelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,__lowerCAmelCase ) )
# verify class_labels
_lowerCamelCase : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,__lowerCAmelCase ) )
# verify masks
_lowerCamelCase : str = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() ,__lowerCAmelCase )
# verify orig_size
_lowerCamelCase : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,__lowerCAmelCase ) )
# verify size
_lowerCamelCase : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,__lowerCAmelCase ) )
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : str = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : List[str] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : List[Any] = value
elif weight_type == "weight_g":
_lowerCamelCase : str = value
elif weight_type == "weight_v":
_lowerCamelCase : Any = value
elif weight_type == "bias":
_lowerCamelCase : Union[str, Any] = value
elif weight_type == "running_mean":
_lowerCamelCase : Union[str, Any] = value
elif weight_type == "running_var":
_lowerCamelCase : Any = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : Optional[int] = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[Any] = value
else:
_lowerCamelCase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = []
_lowerCamelCase : str = fairseq_model.state_dict()
_lowerCamelCase : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Optional[Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : List[str] = True
if "*" in mapped_key:
_lowerCamelCase : int = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : Tuple = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : List[str] = None
elif "pos_bias_v" in name:
_lowerCamelCase : Optional[int] = None
elif "weight_g" in name:
_lowerCamelCase : Optional[Any] = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : int = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[str] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : str = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : List[Any] = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : List[Any] = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Optional[Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : Optional[int] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : List[Any] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : List[str] = target_dict.pad_index
_lowerCamelCase : List[Any] = target_dict.bos_index
_lowerCamelCase : Dict = target_dict.eos_index
_lowerCamelCase : List[str] = len(target_dict.symbols )
_lowerCamelCase : Optional[int] = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Any = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : str = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : Tuple = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Tuple = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[int] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = multiprocessing.Manager()
_lowerCamelCase : Optional[int] = manager.list()
_lowerCamelCase : Union[str, Any] = multiprocessing.Process(target=_lowerCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_lowerCamelCase : Dict = shutil.rmtree
_lowerCamelCase : Optional[int] = os.rmdir
_lowerCamelCase : List[str] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_lowerCamelCase : Optional[int] = {}
with swallow_io():
with time_limit(_lowerCamelCase ):
exec(_lowerCamelCase , _lowerCamelCase )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
_lowerCamelCase : str = rmtree
_lowerCamelCase : Optional[Any] = rmdir
_lowerCamelCase : List[str] = chdir
@contextlib.contextmanager
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
def signal_handler(_lowerCamelCase , _lowerCamelCase ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , _lowerCamelCase )
signal.signal(signal.SIGALRM , _lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(_lowerCamelCase ):
with contextlib.redirect_stderr(_lowerCamelCase ):
with redirect_stdin(_lowerCamelCase ):
yield
@contextlib.contextmanager
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_lowerCamelCase ):
yield dirname
class A_ ( _a ):
pass
class A_ ( io.StringIO ):
def _lowercase ( self: Optional[Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Union[str, Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Any ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Dict ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
return False
class A_ ( contextlib._RedirectStream ): # type: ignore
lowerCAmelCase__ = 'stdin'
@contextlib.contextmanager
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
if root == ".":
yield
return
_lowerCamelCase : List[str] = os.getcwd()
os.chdir(_lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase=None ) -> str:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = None
import os
_lowerCamelCase : List[str] = "1"
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
_lowerCamelCase : str = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
import shutil
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = None
import subprocess
_lowerCamelCase : Dict = None # type: ignore
_lowerCamelCase : Any = None
import sys
_lowerCamelCase : str = None
_lowerCamelCase : str = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = None
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCAmelCase : Union[str, Any] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self: Optional[Any] ,__lowerCAmelCase: T ):
'''simple docstring'''
_lowerCamelCase : Tuple = data
_lowerCamelCase : int = self
_lowerCamelCase : Optional[int] = 0
class A_ ( Generic[T] ):
def __init__( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : dict[T, DisjointSetTreeNode[T]] = {}
def _lowercase ( self: Any ,__lowerCAmelCase: T ):
'''simple docstring'''
_lowerCamelCase : List[Any] = DisjointSetTreeNode(__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: T ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCamelCase : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: DisjointSetTreeNode[T] ,__lowerCAmelCase: DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
_lowerCamelCase : Tuple = nodea
else:
_lowerCamelCase : Optional[int] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _lowercase ( self: Any ,__lowerCAmelCase: T ,__lowerCAmelCase: T ):
'''simple docstring'''
self.link(self.find_set(__lowerCAmelCase ) ,self.find_set(__lowerCAmelCase ) )
class A_ ( Generic[T] ):
def __init__( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : dict[T, dict[T, int]] = {}
def _lowercase ( self: Tuple ,__lowerCAmelCase: T ):
'''simple docstring'''
if node not in self.connections:
_lowerCamelCase : List[str] = {}
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: T ,__lowerCAmelCase: T ,__lowerCAmelCase: int ):
'''simple docstring'''
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_lowerCamelCase : Dict = weight
_lowerCamelCase : Dict = weight
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __lowerCAmelCase : x[2] )
# creating the disjoint set
_lowerCamelCase : str = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__lowerCAmelCase )
# MST generation
_lowerCamelCase : Tuple = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = edges[index]
index += 1
_lowerCamelCase : Union[str, Any] = disjoint_set.find_set(__lowerCAmelCase )
_lowerCamelCase : Tuple = disjoint_set.find_set(__lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
disjoint_set.union(__lowerCAmelCase ,__lowerCAmelCase )
return graph
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : Optional[int] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_ ( _a ):
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"num_attention_heads" ) )
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Any=640 ,__lowerCAmelCase: List[str]=4 ,__lowerCAmelCase: Dict="silu" ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: List[str]=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[Any] = last_hidden_size
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Union[str, Any] = conv_kernel_size
_lowerCamelCase : Any = output_stride
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = classifier_dropout_prob
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = is_training
_lowerCamelCase : int = num_labels
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = scope
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self: str ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = MobileViTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : str = MobileViTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Tuple = MobileViTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = MobileViTModelTester(self )
_lowerCamelCase : Optional[int] = MobileViTConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ):
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = outputs.hidden_states
_lowerCamelCase : Optional[Any] = 5
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCamelCase : int = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = MobileViTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Any ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Any = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Any = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : int = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] ,device=__lowerCAmelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Optional[Any] = model.to(__lowerCAmelCase )
_lowerCamelCase : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Any = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase ,target_sizes=[(50, 60)] )
_lowerCamelCase : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,__lowerCAmelCase )
_lowerCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,__lowerCAmelCase )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class A_ ( nn.Module ):
def __init__( self: int ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any]=0.0 ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: str = "geglu" ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: str = "layer_norm" ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : int = only_cross_attention
_lowerCamelCase : Tuple = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_lowerCamelCase : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_lowerCamelCase : Tuple = AdaLayerNorm(__lowerCAmelCase ,__lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_lowerCamelCase : Union[str, Any] = AdaLayerNormZero(__lowerCAmelCase ,__lowerCAmelCase )
else:
_lowerCamelCase : Dict = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Attention(
query_dim=__lowerCAmelCase ,heads=__lowerCAmelCase ,dim_head=__lowerCAmelCase ,dropout=__lowerCAmelCase ,bias=__lowerCAmelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=__lowerCAmelCase ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_lowerCamelCase : int = (
AdaLayerNorm(__lowerCAmelCase ,__lowerCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
)
_lowerCamelCase : Union[str, Any] = Attention(
query_dim=__lowerCAmelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=__lowerCAmelCase ,dim_head=__lowerCAmelCase ,dropout=__lowerCAmelCase ,bias=__lowerCAmelCase ,upcast_attention=__lowerCAmelCase ,) # is self-attn if encoder_hidden_states is none
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Any = None
# 3. Feed-forward
_lowerCamelCase : Tuple = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
_lowerCamelCase : Any = FeedForward(__lowerCAmelCase ,dropout=__lowerCAmelCase ,activation_fn=__lowerCAmelCase ,final_dropout=__lowerCAmelCase )
# let chunk size default to None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = 0
def _lowercase ( self: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Any = chunk_size
_lowerCamelCase : Dict = dim
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[torch.LongTensor] = None ,__lowerCAmelCase: Dict[str, Any] = None ,__lowerCAmelCase: Optional[torch.LongTensor] = None ,):
'''simple docstring'''
if self.use_ada_layer_norm:
_lowerCamelCase : List[str] = self.norma(__lowerCAmelCase ,__lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self.norma(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,hidden_dtype=hidden_states.dtype )
else:
_lowerCamelCase : List[str] = self.norma(__lowerCAmelCase )
_lowerCamelCase : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_lowerCamelCase : str = self.attna(
__lowerCAmelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
if self.use_ada_layer_norm_zero:
_lowerCamelCase : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
_lowerCamelCase : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_lowerCamelCase : Optional[int] = (
self.norma(__lowerCAmelCase ,__lowerCAmelCase ) if self.use_ada_layer_norm else self.norma(__lowerCAmelCase )
)
_lowerCamelCase : Optional[int] = self.attna(
__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = attn_output + hidden_states
# 3. Feed-forward
_lowerCamelCase : int = self.norma(__lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_lowerCamelCase : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
_lowerCamelCase : List[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_lowerCamelCase : Union[str, Any] = torch.cat(
[self.ff(__lowerCAmelCase ) for hid_slice in norm_hidden_states.chunk(__lowerCAmelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
_lowerCamelCase : Any = self.ff(__lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_lowerCamelCase : Optional[int] = gate_mlp.unsqueeze(1 ) * ff_output
_lowerCamelCase : int = ff_output + hidden_states
return hidden_states
class A_ ( nn.Module ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: int = 4 ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: str = "geglu" ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = int(dim * mult )
_lowerCamelCase : str = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_lowerCamelCase : str = GELU(__lowerCAmelCase ,__lowerCAmelCase )
if activation_fn == "gelu-approximate":
_lowerCamelCase : Any = GELU(__lowerCAmelCase ,__lowerCAmelCase ,approximate="tanh" )
elif activation_fn == "geglu":
_lowerCamelCase : Union[str, Any] = GEGLU(__lowerCAmelCase ,__lowerCAmelCase )
elif activation_fn == "geglu-approximate":
_lowerCamelCase : Optional[int] = ApproximateGELU(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(__lowerCAmelCase )
# project dropout
self.net.append(nn.Dropout(__lowerCAmelCase ) )
# project out
self.net.append(nn.Linear(__lowerCAmelCase ,__lowerCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCAmelCase ) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
for module in self.net:
_lowerCamelCase : int = module(__lowerCAmelCase )
return hidden_states
class A_ ( nn.Module ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: str = "none" ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = approximate
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def _lowercase ( self: Dict ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.proj(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.gelu(__lowerCAmelCase )
return hidden_states
class A_ ( nn.Module ):
def __init__( self: str ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,dim_out * 2 )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.proj(__lowerCAmelCase ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(__lowerCAmelCase )
class A_ ( nn.Module ):
def __init__( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Optional[Any] = nn.Linear(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.proj(__lowerCAmelCase )
return x * torch.sigmoid(1.7_02 * x )
class A_ ( nn.Module ):
def __init__( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = nn.Embedding(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = nn.SiLU()
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,embedding_dim * 2 )
_lowerCamelCase : Dict = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.linear(self.silu(self.emb(__lowerCAmelCase ) ) )
_lowerCamelCase, _lowerCamelCase : Tuple = torch.chunk(__lowerCAmelCase ,2 )
_lowerCamelCase : Tuple = self.norm(__lowerCAmelCase ) * (1 + scale) + shift
return x
class A_ ( nn.Module ):
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Optional[Any] = CombinedTimestepLabelEmbeddings(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = nn.SiLU()
_lowerCamelCase : Union[str, Any] = nn.Linear(__lowerCAmelCase ,6 * embedding_dim ,bias=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase ,eps=1e-6 )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple=None ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.linear(self.silu(self.emb(__lowerCAmelCase ,__lowerCAmelCase ,hidden_dtype=__lowerCAmelCase ) ) )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = emb.chunk(6 ,dim=1 )
_lowerCamelCase : int = self.norm(__lowerCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class A_ ( nn.Module ):
def __init__( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: float = 1e-5 ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : str = num_groups
_lowerCamelCase : int = eps
if act_fn is None:
_lowerCamelCase : List[str] = None
else:
_lowerCamelCase : List[str] = get_activation(__lowerCAmelCase )
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,out_dim * 2 )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
if self.act:
_lowerCamelCase : Optional[Any] = self.act(__lowerCAmelCase )
_lowerCamelCase : int = self.linear(__lowerCAmelCase )
_lowerCamelCase : int = emb[:, :, None, None]
_lowerCamelCase, _lowerCamelCase : List[Any] = emb.chunk(2 ,dim=1 )
_lowerCamelCase : List[str] = F.group_norm(__lowerCAmelCase ,self.num_groups ,eps=self.eps )
_lowerCamelCase : List[str] = x * (1 + scale) + shift
return x
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
_lowerCAmelCase : Any = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_lowerCamelCase : Stack[int] = Stack()
_lowerCamelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
_lowerCamelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Tuple = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : List[str] = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
_lowerCamelCase : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if metric == "rouge2":
_lowerCamelCase : Optional[int] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_lowerCamelCase : Optional[int] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_lowerCamelCase : Dict = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_lowerCamelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_lowerCamelCase : Dict = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class A_ ( pl.Callback ):
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCAmelCase )
@rank_zero_only
def _lowercase ( self: List[Any] ,__lowerCAmelCase: pl.Trainer ,__lowerCAmelCase: pl.LightningModule ,__lowerCAmelCase: str ,__lowerCAmelCase: Tuple=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_lowerCamelCase : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_lowerCamelCase : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCamelCase : Optional[int] = od / "test_results.txt"
_lowerCamelCase : List[Any] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCamelCase : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_lowerCamelCase : Optional[int] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCAmelCase )
generations_file.parent.mkdir(exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase ,"a+" ) as writer:
for key in sorted(__lowerCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCamelCase : Union[str, Any] = metrics[key]
if isinstance(__lowerCAmelCase ,torch.Tensor ):
_lowerCamelCase : Union[str, Any] = val.item()
_lowerCamelCase : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(__lowerCAmelCase )
if not save_generations:
return
if "preds" in metrics:
_lowerCamelCase : List[str] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__lowerCAmelCase )
@rank_zero_only
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ):
'''simple docstring'''
try:
_lowerCamelCase : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCamelCase : Optional[int] = pl_module.model.num_parameters()
_lowerCamelCase : Dict = count_trainable_parameters(__lowerCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: pl.Trainer ,__lowerCAmelCase: pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(__lowerCAmelCase ,__lowerCAmelCase ,"test" )
@rank_zero_only
def _lowercase ( self: List[str] ,__lowerCAmelCase: pl.Trainer ,__lowerCAmelCase: int ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : Tuple = logging.getLogger()
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowerCamelCase : Tuple = parser.parse_args()
return args.f
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = os.path.join(_lowerCamelCase , "all_results.json" )
if os.path.exists(_lowerCamelCase ):
with open(_lowerCamelCase , "r" ) as f:
_lowerCamelCase : Dict = json.load(_lowerCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( _a ):
@classmethod
def _lowercase ( cls: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = os.path.join(cls.tmpdir ,"default_config.yml" )
write_basic_config(save_location=cls.configPath )
_lowerCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase ( cls: Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
_lowerCamelCase : Tuple = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"glue_no_trainer" ) ) )
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[Any] = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_lowerCamelCase : Union[str, Any] = get_results(__lowerCAmelCase )
self.assertLess(result["perplexity"] ,100 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"clm_no_trainer" ) ) )
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_lowerCamelCase : Optional[int] = get_results(__lowerCAmelCase )
self.assertLess(result["perplexity"] ,42 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = 7 if get_gpu_count() > 1 else 2
_lowerCamelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_lowerCamelCase : Optional[int] = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
self.assertLess(result["train_loss"] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_lowerCamelCase : int = get_results(__lowerCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] ,28 )
self.assertGreaterEqual(result["eval_exact"] ,28 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"qa_no_trainer" ) ) )
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[Any] = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_lowerCamelCase : str = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[Any] = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_lowerCamelCase : Dict = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] ,10 )
self.assertGreaterEqual(result["eval_rouge2"] ,2 )
self.assertGreaterEqual(result["eval_rougeL"] ,7 )
self.assertGreaterEqual(result["eval_rougeLsum"] ,7 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_lowerCamelCase : Dict = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] ,30 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"translation_no_trainer" ) ) )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[int] = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_lowerCamelCase : Any = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] ,0.10 )
@mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
_lowerCamelCase : Union[str, Any] = get_results(__lowerCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"image_classification_no_trainer" ) ) )
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
from math import factorial
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : List[str] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> Optional[int]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : List[str] = random.Random()
_lowerCamelCase : str = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[Any] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : str = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : List[Any] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : Optional[int] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : List[Any] = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : str = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Optional[Any] = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : str = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Union[str, Any] = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Dict = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[int] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : Optional[int] = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[Any] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : List[Any] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : List[str] = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : Dict = 2
_lowerCamelCase : List[str] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Tuple = max_length
_lowerCamelCase : Optional[int] = 0.8
_lowerCamelCase : List[str] = 10
_lowerCamelCase : Tuple = 0.3
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : List[str] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = jit(model.generate )
_lowerCamelCase : Tuple = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Optional[Any] = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : str = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : List[str] = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Tuple = 2
_lowerCamelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : str = "Hello world"
_lowerCamelCase : List[Any] = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : str = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=None , _lowerCamelCase="no" , _lowerCamelCase="29500" ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
_lowerCamelCase : Optional[int] = True
elif "IPython" in sys.modules:
_lowerCamelCase : Tuple = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
_lowerCamelCase : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _lowerCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[str] = PrepareForLaunch(_lowerCamelCase , distributed_type="TPU" )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_lowerCamelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCamelCase , master_addr="127.0.01" , master_port=_lowerCamelCase , mixed_precision=_lowerCamelCase ):
_lowerCamelCase : Optional[Any] = PrepareForLaunch(_lowerCamelCase , distributed_type="MULTI_GPU" )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_lowerCamelCase : Union[str, Any] = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=2 ) -> List[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
_lowerCamelCase : Tuple = PrepareForLaunch(_lowerCamelCase , debug=_lowerCamelCase )
start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method="fork" )
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCAmelCase : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
class A_ :
def __init__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = ""
_lowerCamelCase : Union[str, Any] = ""
_lowerCamelCase : Optional[int] = []
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCamelCase : Tuple = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
else:
_lowerCamelCase : List[str] = self.__min_dist_top_down_dp(__lowerCAmelCase ,n - 1 )
_lowerCamelCase : int = self.__min_dist_top_down_dp(m - 1 ,__lowerCAmelCase )
_lowerCamelCase : int = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
_lowerCamelCase : str = 1 + min(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
return self.dp[m][n]
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = worda
_lowerCamelCase : List[str] = worda
_lowerCamelCase : Optional[Any] = [[-1 for _ in range(len(__lowerCAmelCase ) )] for _ in range(len(__lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCAmelCase ) - 1 ,len(__lowerCAmelCase ) - 1 )
def _lowercase ( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = worda
_lowerCamelCase : List[str] = worda
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase )
_lowerCamelCase : int = len(__lowerCAmelCase )
_lowerCamelCase : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCamelCase : Optional[Any] = j
elif j == 0: # second string is empty
_lowerCamelCase : int = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCamelCase : Union[str, Any] = self.dp[i - 1][j - 1]
else:
_lowerCamelCase : Union[str, Any] = self.dp[i][j - 1]
_lowerCamelCase : Optional[Any] = self.dp[i - 1][j]
_lowerCamelCase : Dict = self.dp[i - 1][j - 1]
_lowerCamelCase : int = 1 + min(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
_lowerCAmelCase : Any = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
_lowerCAmelCase : Union[str, Any] = input('''Enter the first string: ''').strip()
_lowerCAmelCase : Tuple = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Optional[Any] = False
if num < 0:
_lowerCamelCase : Tuple = True
_lowerCamelCase : str = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCamelCase ) for e in binary )
return "0b" + "".join(str(_lowerCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCAmelCase : Union[str, Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : Union[str, Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_lowerCAmelCase : List[Any] = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_lowerCamelCase : List[str] = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _lowerCamelCase , )
is not None
):
_lowerCamelCase : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowerCamelCase : Tuple = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowerCamelCase : Dict = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
_lowerCamelCase : Union[str, Any] = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
_lowerCamelCase : Optional[int] = True
if not attribute_used:
_lowerCamelCase : Union[str, Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowerCamelCase : Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowerCamelCase : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowerCamelCase : List[Any] = True
elif attribute.endswith("_token_id" ):
_lowerCamelCase : Any = True
# configuration class specific cases
if not case_allowed:
_lowerCamelCase : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_lowerCamelCase : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[str] = dict(inspect.signature(config_class.__init__ ).parameters )
_lowerCamelCase : Optional[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
_lowerCamelCase : List[str] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowerCamelCase : List[Any] = {}
if len(config_class.attribute_map ) > 0:
_lowerCamelCase : List[str] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowerCamelCase : Optional[Any] = inspect.getsourcefile(_lowerCamelCase )
_lowerCamelCase : int = os.path.dirname(_lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowerCamelCase : int = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for fn in os.listdir(_lowerCamelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
_lowerCamelCase : str = []
for path in modeling_paths:
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
_lowerCamelCase : str = []
for config_param, default_value in zip(_lowerCamelCase , _lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
_lowerCamelCase : Any = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_lowerCamelCase )
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowerCamelCase : Any = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(_lowerCamelCase )
and issubclass(_lowerCamelCase , _lowerCamelCase )
and inspect.getmodule(_lowerCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_lowerCamelCase : str = check_config_attributes_being_used(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCamelCase : Dict = unused_attributes
if len(_lowerCamelCase ) > 0:
_lowerCamelCase : Dict = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: Optional[int]=[8, 16, 32, 64] ,__lowerCAmelCase: Dict=[1, 1, 2, 1] ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int="relu" ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Dict=["stage2", "stage3", "stage4"] ,__lowerCAmelCase: List[Any]=[2, 3, 4] ,__lowerCAmelCase: Any=1 ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Any = embeddings_size
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Optional[int] = depths
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = num_labels
_lowerCamelCase : Dict = scope
_lowerCamelCase : List[Any] = len(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = out_features
_lowerCamelCase : Any = out_indices
_lowerCamelCase : List[str] = num_groups
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = BitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : List[str] = BitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Dict = BitModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="Bit does not output attentions" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def _lowercase ( self: str ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: Any ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ):
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : int = layer_type
_lowerCamelCase : Tuple = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[int] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = BitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@require_torch
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = BitConfig
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = BitModelTester(self )
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: int ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." ,__lowerCAmelCase ,)
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase )
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCamelCase : Any = str(_lowerCamelCase )
_lowerCamelCase : List[str] = "".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase_( _lowerCamelCase = 99 ) -> int:
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : List[Any] = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_lowerCAmelCase : Any = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_lowerCAmelCase : Union[str, Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase : Optional[Any] = f'''down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Optional[int] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase : str = f'''down_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase : Union[str, Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase : Dict = f'''up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Optional[int] = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase : Dict = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase : Optional[int] = f'''down_blocks.{i}.downsamplers.0.conv.'''
_lowerCAmelCase : List[str] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase : Dict = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase : int = '''mid_block.attentions.0.'''
_lowerCAmelCase : Optional[int] = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase : Tuple = f'''mid_block.resnets.{j}.'''
_lowerCAmelCase : List[str] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_lowerCamelCase : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_lowerCamelCase : int = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_lowerCamelCase : List[Any] = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = v
_lowerCamelCase : List[str] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase : Optional[Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Optional[int] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.'''
_lowerCAmelCase : str = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase : List[Any] = f'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase : Any = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase : int = f'''decoder.up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Union[str, Any] = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase : str = f'''mid_block.resnets.{i}.'''
_lowerCAmelCase : Any = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_lowerCamelCase : str = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_lowerCamelCase : Any = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = v
_lowerCamelCase : List[Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
_lowerCamelCase : str = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
_lowerCamelCase : Union[str, Any] = reshape_weight_for_sd(_lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_lowerCAmelCase : str = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase : Dict = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase : str = {'''q''': 0, '''k''': 1, '''v''': 2}
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
_lowerCamelCase : Dict = k[: -len(".q_proj.weight" )]
_lowerCamelCase : Optional[int] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
_lowerCamelCase : Optional[Any] = [None, None, None]
_lowerCamelCase : Optional[Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
_lowerCamelCase : str = k[: -len(".q_proj.bias" )]
_lowerCamelCase : Optional[Any] = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
_lowerCamelCase : Union[str, Any] = [None, None, None]
_lowerCamelCase : str = v
continue
_lowerCamelCase : Any = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCamelCase : Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_lowerCamelCase : Dict = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCamelCase : str = torch.cat(_lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_lowerCamelCase : Any = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = torch.cat(_lowerCamelCase )
return new_state_dict
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_lowerCAmelCase : str = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase : Optional[int] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase : int = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase : Optional[Any] = load_file(unet_path, device='''cpu''')
else:
_lowerCAmelCase : Optional[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase : Dict = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_lowerCAmelCase : Optional[int] = load_file(vae_path, device='''cpu''')
else:
_lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_lowerCAmelCase : Any = load_file(text_enc_path, device='''cpu''')
else:
_lowerCAmelCase : Any = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_lowerCAmelCase : List[str] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase : Tuple = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase : Any = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase : List[str] = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase : List[str] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase : Union[str, Any] = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase : List[Any] = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase : int = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase : str = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase : Dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase : Optional[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase : int = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'tokenizer']
lowerCAmelCase__ = 'AutoImageProcessor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self: Optional[int] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = self.image_processor
def __call__( self: int ,__lowerCAmelCase: int=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_lowerCamelCase : Dict = self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if images is not None:
_lowerCamelCase : Any = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None and images is not None:
_lowerCamelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) ,tensor_type=__lowerCAmelCase )
def _lowercase ( self: Dict ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class A_ ( _a ):
lowerCAmelCase__ = 'unispeech-sat'
def __init__( self: List[str] ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: str=0.0 ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Tuple=1e-5 ,__lowerCAmelCase: Tuple="group" ,__lowerCAmelCase: Optional[Any]="gelu" ,__lowerCAmelCase: List[Any]=(512, 512, 512, 512, 512, 512, 512) ,__lowerCAmelCase: Dict=(5, 2, 2, 2, 2, 2, 2) ,__lowerCAmelCase: Any=(10, 3, 3, 3, 3, 2, 2) ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Union[str, Any]=128 ,__lowerCAmelCase: Optional[Any]=16 ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[int]=0.05 ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Optional[int]=0.0 ,__lowerCAmelCase: Dict=10 ,__lowerCAmelCase: Dict=0 ,__lowerCAmelCase: int=320 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Any=100 ,__lowerCAmelCase: List[Any]=256 ,__lowerCAmelCase: Any=256 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: List[str]="mean" ,__lowerCAmelCase: str=False ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Any=256 ,__lowerCAmelCase: Optional[int]=(512, 512, 512, 512, 1_500) ,__lowerCAmelCase: Optional[Any]=(5, 3, 3, 1, 1) ,__lowerCAmelCase: Optional[Any]=(1, 2, 3, 1, 1) ,__lowerCAmelCase: Dict=512 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Optional[int]=504 ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase ,pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = feat_extract_norm
_lowerCamelCase : int = feat_extract_activation
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[Any] = list(__lowerCAmelCase )
_lowerCamelCase : str = list(__lowerCAmelCase )
_lowerCamelCase : Dict = conv_bias
_lowerCamelCase : List[Any] = num_conv_pos_embeddings
_lowerCamelCase : int = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : List[Any] = hidden_dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : List[Any] = activation_dropout
_lowerCamelCase : Union[str, Any] = feat_proj_dropout
_lowerCamelCase : List[Any] = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Any = num_clusters
_lowerCamelCase : int = do_stable_layer_norm
_lowerCamelCase : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : List[Any] = apply_spec_augment
_lowerCamelCase : List[str] = mask_time_prob
_lowerCamelCase : Optional[int] = mask_time_length
_lowerCamelCase : str = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : Any = mask_feature_length
_lowerCamelCase : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : int = num_codevectors_per_group
_lowerCamelCase : List[Any] = num_codevector_groups
_lowerCamelCase : List[str] = contrastive_logits_temperature
_lowerCamelCase : List[Any] = feat_quantizer_dropout
_lowerCamelCase : List[Any] = num_negatives
_lowerCamelCase : Dict = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : int = diversity_loss_weight
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = list(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = xvector_output_dim
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
_lowerCAmelCase : Optional[Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
_lowerCAmelCase : Optional[int] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
_lowerCAmelCase : Tuple = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/jitsi/jiwer/"] ,reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(__lowerCAmelCase ,__lowerCAmelCase )["wer"]
else:
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = 0
for prediction, reference in zip(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = compute_measures(__lowerCAmelCase ,__lowerCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
def get_matched_characters(_lowerCamelCase , _lowerCamelCase ) -> str:
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase : str = int(max(0 , i - limit ) )
_lowerCamelCase : int = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCamelCase )
_lowerCamelCase : Optional[int] = F"""{_stra[0:_stra.index(_lowerCamelCase )]} {_stra[_stra.index(_lowerCamelCase ) + 1:]}"""
return "".join(_lowerCamelCase )
# matching characters
_lowerCamelCase : Tuple = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Dict = len(_lowerCamelCase )
# transposition
_lowerCamelCase : Dict = (
len([(ca, ca) for ca, ca in zip(_lowerCamelCase , _lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase : int = 0.0
else:
_lowerCamelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_lowerCamelCase )
+ match_count / len(_lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
_lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class A_ :
lowerCAmelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class A_ :
lowerCAmelCase__ = field(default=_a , metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self: Tuple ):
'''simple docstring'''
if self.train_file is not None:
_lowerCamelCase : List[str] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCamelCase : Tuple = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self: Any ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
_lowerCamelCase : List[Any] = [feature.pop(__lowerCAmelCase ) for feature in features]
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
_lowerCamelCase : str = len(features[0]["input_ids"] )
_lowerCamelCase : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCAmelCase )] for feature in features
]
_lowerCamelCase : List[str] = list(chain(*__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = self.tokenizer.pad(
__lowerCAmelCase ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
# Un-flatten
_lowerCamelCase : List[str] = {k: v.view(__lowerCAmelCase ,__lowerCAmelCase ,-1 ) for k, v in batch.items()}
# Add back labels
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ,dtype=torch.intaa )
return batch
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCamelCase : str = {}
if data_args.train_file is not None:
_lowerCamelCase : List[Any] = data_args.train_file
if data_args.validation_file is not None:
_lowerCamelCase : Union[str, Any] = data_args.validation_file
_lowerCamelCase : Optional[Any] = data_args.train_file.split("." )[-1]
_lowerCamelCase : List[Any] = load_dataset(
_lowerCamelCase , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCamelCase : Optional[Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCamelCase : str = [F"""ending{i}""" for i in range(4 )]
_lowerCamelCase : Any = "sent1"
_lowerCamelCase : int = "sent2"
if data_args.max_seq_length is None:
_lowerCamelCase : Union[str, Any] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_lowerCamelCase : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_lowerCamelCase : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCamelCase ):
_lowerCamelCase : List[Any] = [[context] * 4 for context in examples[context_name]]
_lowerCamelCase : str = examples[question_header_name]
_lowerCamelCase : List[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowerCamelCase )
]
# Flatten out
_lowerCamelCase : Any = list(chain(*_lowerCamelCase ) )
_lowerCamelCase : List[str] = list(chain(*_lowerCamelCase ) )
# Tokenize
_lowerCamelCase : Optional[Any] = tokenizer(
_lowerCamelCase , _lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_lowerCamelCase : str = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowerCamelCase : str = min(len(_lowerCamelCase ) , data_args.max_train_samples )
_lowerCamelCase : List[str] = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_lowerCamelCase : str = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_lowerCamelCase : str = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowerCamelCase : int = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
_lowerCamelCase : int = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_lowerCamelCase : Optional[Any] = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCamelCase : List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCamelCase ):
_lowerCamelCase, _lowerCamelCase : Optional[int] = eval_predictions
_lowerCamelCase : List[Any] = np.argmax(_lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCamelCase : Tuple = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCamelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Tuple = last_checkpoint
_lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCamelCase : Optional[Any] = train_result.metrics
_lowerCamelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCamelCase : Optional[Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("train" , _lowerCamelCase )
trainer.save_metrics("train" , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : int = trainer.evaluate()
_lowerCamelCase : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCamelCase : List[Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import os
from distutils.util import strtobool
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
for e in env_keys:
_lowerCamelCase : List[Any] = int(os.environ.get(_lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="no" ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return value
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: List[Any]=7 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: str=99 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: List[Any]=4 ,__lowerCAmelCase: Optional[Any]=37 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: List[str]=512 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=0 ,__lowerCAmelCase: Optional[int]=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : List[str] = use_input_mask
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = projection_dim
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[Any] = scope
_lowerCamelCase : Optional[int] = bos_token_id
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : List[str] = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCamelCase : Optional[int] = input_mask.numpy()
_lowerCamelCase, _lowerCamelCase : str = input_mask.shape
_lowerCamelCase : Tuple = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = TFBlipTextModel(config=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,training=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = BlipTextModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = TFBlipTextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCAmelCase )
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : int = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Optional[int] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_lowerCAmelCase : Union[str, Any] = {'''allegro/herbert-base-cased''': 514}
_lowerCAmelCase : Any = {}
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = HerbertTokenizer
def __init__( self: Dict ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Any="<s>" ,__lowerCAmelCase: Dict="<unk>" ,__lowerCAmelCase: str="<pad>" ,__lowerCAmelCase: Optional[int]="<mask>" ,__lowerCAmelCase: Union[str, Any]="</s>" ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,__lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : List[Any] = [self.cls_token_id]
_lowerCamelCase : Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase ,token_ids_a=__lowerCAmelCase ,already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowercase ( self: str ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : str = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ):
'''simple docstring'''
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase ,name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[int] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_lowerCAmelCase : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_lowerCAmelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_lowerCAmelCase : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_lowerCAmelCase : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_lowerCAmelCase : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : List[Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_lowerCAmelCase : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_lowerCAmelCase : Tuple = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_a )
class A_ :
def __call__( self: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Union[bool, str] = False ,__lowerCAmelCase: Union[bool, str] = False ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: Optional[bool] = None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
elif titles is None or texts is None:
_lowerCamelCase : int = titles if texts is None else texts
return super().__call__(
__lowerCAmelCase ,__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Dict = titles if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [titles]
_lowerCamelCase : List[Any] = texts if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [texts]
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase )
_lowerCamelCase : Tuple = questions if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [questions] * n_passages
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
F"""There should be as many titles than texts but got {len(__lowerCAmelCase )} titles and {len(__lowerCAmelCase )} texts.""" )
_lowerCamelCase : Dict = super().__call__(__lowerCAmelCase ,__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase )["input_ids"]
_lowerCamelCase : List[str] = super().__call__(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase )["input_ids"]
_lowerCamelCase : List[Any] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
}
if return_attention_mask is not False:
_lowerCamelCase : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCamelCase : Union[str, Any] = attention_mask
return self.pad(__lowerCAmelCase ,padding=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: BatchEncoding ,__lowerCAmelCase: DPRReaderOutput ,__lowerCAmelCase: int = 16 ,__lowerCAmelCase: int = 64 ,__lowerCAmelCase: int = 4 ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = reader_input["input_ids"]
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = reader_output[:3]
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase )
_lowerCamelCase : Any = sorted(range(__lowerCAmelCase ) ,reverse=__lowerCAmelCase ,key=relevance_logits.__getitem__ )
_lowerCamelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCamelCase : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCamelCase : Optional[Any] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCamelCase : str = sequence_ids.index(self.pad_token_id )
else:
_lowerCamelCase : Any = len(__lowerCAmelCase )
_lowerCamelCase : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=__lowerCAmelCase ,top_spans=__lowerCAmelCase ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=__lowerCAmelCase ,start_index=__lowerCAmelCase ,end_index=__lowerCAmelCase ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(__lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : int = []
for start_index, start_score in enumerate(__lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCamelCase : Optional[int] = sorted(__lowerCAmelCase ,key=lambda __lowerCAmelCase : x[1] ,reverse=__lowerCAmelCase )
_lowerCamelCase : Dict = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCamelCase : Any = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class A_ ( _a , _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ['input_ids', 'attention_mask']
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''luke'''
def __init__( self , __lowerCAmelCase=5_0_2_6_7 , __lowerCAmelCase=5_0_0_0_0_0 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = vocab_size
__magic_name__ :int = entity_vocab_size
__magic_name__ :List[str] = hidden_size
__magic_name__ :Union[str, Any] = entity_emb_size
__magic_name__ :Tuple = num_hidden_layers
__magic_name__ :Dict = num_attention_heads
__magic_name__ :Optional[Any] = hidden_act
__magic_name__ :Tuple = intermediate_size
__magic_name__ :List[Any] = hidden_dropout_prob
__magic_name__ :List[Any] = attention_probs_dropout_prob
__magic_name__ :Dict = max_position_embeddings
__magic_name__ :Optional[Any] = type_vocab_size
__magic_name__ :int = initializer_range
__magic_name__ :str = layer_norm_eps
__magic_name__ :Union[str, Any] = use_entity_aware_attention
__magic_name__ :Tuple = classifier_dropout
| 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 0 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
UpperCAmelCase_ = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
UpperCAmelCase_ = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
UpperCAmelCase_ = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
UpperCAmelCase_ = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase_ = np.expand_dims(test_image, axis=0)
UpperCAmelCase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase_ = """Normal"""
if result[0][0] == 1:
UpperCAmelCase_ = """Abnormality detected"""
| 2 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 0 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=99 , A_=13 , A_=16 , A_=7 , A_=True , A_=True , A_=True , A_=False , A_=True , A_=2 , A_=32 , A_=4 , A_=4 , A_=30 , A_=0 , A_=1 , A_=2 , A_=None , )-> str:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = decoder_seq_length
# For common tests
UpperCamelCase = self.decoder_seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = eos_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = decoder_start_token_id
UpperCamelCase = use_cache
UpperCamelCase = max_position_embeddings
UpperCamelCase = None
UpperCamelCase = decoder_seq_length
UpperCamelCase = 2
UpperCamelCase = 1
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = TrOCRDecoder(config=A_ ).to(A_ ).eval()
UpperCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase = model(A_ , use_cache=A_ )
UpperCamelCase = model(A_ )
UpperCamelCase = model(A_ , use_cache=A_ )
self.parent.assertTrue(len(A_ ) == len(A_ ) )
self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 )
UpperCamelCase = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = model(A_ )['last_hidden_state']
UpperCamelCase = model(A_ , past_key_values=A_ )['last_hidden_state']
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A_ , A_ , atol=1e-3 )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase_ = True
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=A_ )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
pass
| 3 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''gpt_bigcode'''
snake_case__ = ['''past_key_values''']
snake_case__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _snake_case=5_02_57 , _snake_case=10_24 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=None , _snake_case="gelu_pytorch_tanh" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1E-5 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=5_02_56 , _snake_case=5_02_56 , _snake_case=True , _snake_case=True , _snake_case=True , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = attention_softmax_in_fpaa
lowerCAmelCase = scale_attention_softmax_in_fpaa
lowerCAmelCase = multi_query
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 4 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowercase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowercase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = get_test_to_tester_mapping(_lowercase )
_lowerCAmelCase = get_test_to_tester_mapping(_lowercase )
_lowerCAmelCase = {"""BertModelTest""": """BertModelTester"""}
_lowerCAmelCase = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = get_model_to_test_mapping(_lowercase )
_lowerCAmelCase = get_model_to_test_mapping(_lowercase )
_lowerCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
_lowerCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = get_model_to_tester_mapping(_lowercase )
_lowerCAmelCase = get_model_to_tester_mapping(_lowercase )
_lowerCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
_lowerCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
| 5 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ = k.replace(UpperCamelCase__ , UpperCamelCase__ )
if k.startswith("""encoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace(""".attn""" , """.self_attn""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm3""" , """final_layer_norm""" )
return k
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
SCREAMING_SNAKE_CASE__ = sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
SCREAMING_SNAKE_CASE__ = v
_lowerCamelCase = ['START']
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = model["""model"""]
SCREAMING_SNAKE_CASE__ = BlenderbotConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BlenderbotForConditionalGeneration(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE__ = rename_state_dict_key(UpperCamelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase__ )
m.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
m.half()
m.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_lowerCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 6 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''PoolFormerFeatureExtractor''']
a = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 7 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[Any] = logging.getLogger()
def _lowerCAmelCase ( __snake_case : Path , __snake_case : list ) -> List[str]:
__A : Tuple = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
lowercase__ : Optional[Any] = '''patrickvonplaten/t5-tiny-random'''
lowercase__ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase__ : Optional[Any] = '''sshleifer/tiny-mbart'''
lowercase__ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
__A : Optional[Any] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__A : Optional[int] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = str(Path(self.get_auto_remove_tmp_dir()) / 'scores.json')
__A : Dict = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__A : Any = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase):
run_generate()
assert Path(_UpperCAmelCase).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.run_eval_tester(_UpperCAmelCase)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
self.run_eval_tester(_UpperCAmelCase)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
__A : str = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__A : int = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__A : Dict = Path(self.get_auto_remove_tmp_dir())
__A : str = str(tmp_dir / 'scores.json')
__A : int = str(tmp_dir / 'val.target')
_dump_articles(_UpperCAmelCase , text['en'])
_dump_articles(_UpperCAmelCase , text['de'])
__A : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__A : int = F'\n run_eval_search.py\n {model}\n {str(_UpperCAmelCase)}\n {str(_UpperCAmelCase)}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'])
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase):
with CaptureStdout() as cs:
run_search()
__A : str = [' num_beams | length_penalty', model, 'Best score args']
__A : List[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu')
else:
expected_strings.extend(_UpperCAmelCase)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_UpperCAmelCase).exists()
os.remove(Path(_UpperCAmelCase))
| 8 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 0 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["flax"]
def __init__( self : Union[str, Any] , *_snake_case : Dict , **_snake_case : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : Dict , *_snake_case : str , **_snake_case : List[str] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Union[str, Any] , *_snake_case : List[str] , **_snake_case : int ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : int = ["flax"]
def __init__( self : str , *_snake_case : Tuple , **_snake_case : Dict ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : List[str] , *_snake_case : Optional[int] , **_snake_case : int ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : List[Any] , *_snake_case : List[Any] , **_snake_case : str ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = ["flax"]
def __init__( self : Any , *_snake_case : List[str] , **_snake_case : Any ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : int , *_snake_case : Tuple , **_snake_case : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Optional[int] , *_snake_case : str , **_snake_case : int ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = ["flax"]
def __init__( self : Optional[int] , *_snake_case : str , **_snake_case : List[str] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : Dict , *_snake_case : List[Any] , **_snake_case : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Optional[Any] , *_snake_case : int , **_snake_case : Tuple ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["flax"]
def __init__( self : List[str] , *_snake_case : int , **_snake_case : Any ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : Union[str, Any] , *_snake_case : List[str] , **_snake_case : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : int , *_snake_case : Union[str, Any] , **_snake_case : Any ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[int] = ["flax"]
def __init__( self : Optional[int] , *_snake_case : List[str] , **_snake_case : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : str , *_snake_case : List[str] , **_snake_case : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Optional[Any] , *_snake_case : List[str] , **_snake_case : Tuple ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = ["flax"]
def __init__( self : List[str] , *_snake_case : int , **_snake_case : List[Any] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : Dict , *_snake_case : Any , **_snake_case : int ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Optional[Any] , *_snake_case : Tuple , **_snake_case : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = ["flax"]
def __init__( self : Dict , *_snake_case : int , **_snake_case : Dict ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : Dict , *_snake_case : Optional[Any] , **_snake_case : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : int , *_snake_case : List[str] , **_snake_case : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["flax"]
def __init__( self : Tuple , *_snake_case : str , **_snake_case : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : List[Any] , *_snake_case : Dict , **_snake_case : Any ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Optional[int] , *_snake_case : List[str] , **_snake_case : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[int] = ["flax"]
def __init__( self : List[Any] , *_snake_case : List[Any] , **_snake_case : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : Any , *_snake_case : Any , **_snake_case : List[str] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Optional[int] , *_snake_case : Optional[Any] , **_snake_case : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Dict = ["flax"]
def __init__( self : Dict , *_snake_case : Dict , **_snake_case : int ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : Tuple , *_snake_case : str , **_snake_case : Any ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : str , *_snake_case : Union[str, Any] , **_snake_case : List[str] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = ["flax"]
def __init__( self : Any , *_snake_case : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : int , *_snake_case : Any , **_snake_case : Tuple ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Optional[int] , *_snake_case : Optional[Any] , **_snake_case : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
class __lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["flax"]
def __init__( self : Union[str, Any] , *_snake_case : Tuple , **_snake_case : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['flax'] )
@classmethod
def _a ( cls : int , *_snake_case : str , **_snake_case : Dict ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
@classmethod
def _a ( cls : Tuple , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['flax'] )
| 9 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A = None , __A = None):
"""simple docstring"""
if start is None:
_a = 0
if end is None:
_a = len(__A) - 1
if start >= end:
return
_a = (start + end) // 2
slowsort(__A , __A , __A)
slowsort(__A , mid + 1 , __A)
if sequence[end] < sequence[mid]:
_a , _a = sequence[mid], sequence[end]
slowsort(__A , __A , end - 1)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 0 |
def UpperCamelCase ( lowercase_ ) -> set:
'''simple docstring'''
lowercase__ : Optional[Any] = set()
# edges = list of graph's edges
lowercase__ : List[Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase__ , lowercase__ : Union[str, Any] = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def UpperCamelCase ( lowercase_ ) -> set:
'''simple docstring'''
lowercase__ : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 12 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A__ : List[Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A__ : Union[str, Any] = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Any:
__lowerCamelCase : Union[str, Any] = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase_ )[0]
@deprecated(UpperCAmelCase_ , 'Please use tf.data to implement this functionality.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> List[Any]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream:
__lowerCamelCase : str = _readaa(UpperCAmelCase_ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__lowerCamelCase : Dict = _readaa(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = _readaa(UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = _readaa(UpperCAmelCase_ )
__lowerCamelCase : Tuple = bytestream.read(rows * cols * num_images )
__lowerCamelCase : Union[str, Any] = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta )
__lowerCamelCase : Any = data.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 1 )
return data
@deprecated(UpperCAmelCase_ , 'Please use tf.one_hot on tensors.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase : Any = labels_dense.shape[0]
__lowerCamelCase : Optional[int] = numpy.arange(UpperCAmelCase_ ) * num_classes
__lowerCamelCase : List[str] = numpy.zeros((num_labels, num_classes) )
__lowerCamelCase : Optional[Any] = 1
return labels_one_hot
@deprecated(UpperCAmelCase_ , 'Please use tf.data to implement this functionality.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[str]=10 ) -> str:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream:
__lowerCamelCase : List[str] = _readaa(UpperCAmelCase_ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__lowerCamelCase : Any = _readaa(UpperCAmelCase_ )
__lowerCamelCase : Any = bytestream.read(UpperCAmelCase_ )
__lowerCamelCase : Tuple = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase_ , UpperCAmelCase_ )
return labels
class UpperCAmelCase_ :
"""simple docstring"""
@deprecated(
SCREAMING_SNAKE_CASE_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=dtypes.floataa , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> str:
__lowerCamelCase , __lowerCamelCase : Any = random_seed.get_seed(SCREAMING_SNAKE_CASE_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCamelCase : Optional[Any] = dtypes.as_dtype(SCREAMING_SNAKE_CASE_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__lowerCamelCase : Optional[int] = 1_00_00
__lowerCamelCase : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__lowerCamelCase : Optional[int] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCamelCase : Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCamelCase : Union[str, Any] = images.astype(numpy.floataa )
__lowerCamelCase : Union[str, Any] = numpy.multiply(SCREAMING_SNAKE_CASE_ , 1.0 / 2_5_5.0 )
__lowerCamelCase : int = images
__lowerCamelCase : int = labels
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
@property
def lowercase_ ( self ) -> Dict:
return self._images
@property
def lowercase_ ( self ) -> Any:
return self._labels
@property
def lowercase_ ( self ) -> List[Any]:
return self._num_examples
@property
def lowercase_ ( self ) -> Union[str, Any]:
return self._epochs_completed
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) -> int:
if fake_data:
__lowerCamelCase : int = [1] * 7_84
__lowerCamelCase : int = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE_ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE_ )],
)
__lowerCamelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCamelCase : int = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.images[perma]
__lowerCamelCase : Dict = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCamelCase : Any = self._num_examples - start
__lowerCamelCase : Optional[Any] = self._images[start : self._num_examples]
__lowerCamelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.images[perm]
__lowerCamelCase : str = self.labels[perm]
# Start next epoch
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = batch_size - rest_num_examples
__lowerCamelCase : Optional[int] = self._index_in_epoch
__lowerCamelCase : str = self._images[start:end]
__lowerCamelCase : List[str] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCamelCase : Optional[int] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase_ , 'Please write your own downloading logic.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> Tuple:
if not gfile.Exists(UpperCAmelCase_ ):
gfile.MakeDirs(UpperCAmelCase_ )
__lowerCamelCase : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if not gfile.Exists(UpperCAmelCase_ ):
urllib.request.urlretrieve(UpperCAmelCase_ , UpperCAmelCase_ ) # noqa: S310
with gfile.GFile(UpperCAmelCase_ ) as f:
__lowerCamelCase : str = f.size()
print('Successfully downloaded' , UpperCAmelCase_ , UpperCAmelCase_ , 'bytes.' )
return filepath
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[Any]=dtypes.floataa , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=50_00 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[Any]=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase_ , one_hot=UpperCAmelCase_ , dtype=UpperCAmelCase_ , seed=UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = fake()
__lowerCamelCase : Optional[Any] = fake()
__lowerCamelCase : Dict = fake()
return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ )
if not source_url: # empty string check
__lowerCamelCase : Tuple = DEFAULT_SOURCE_URL
__lowerCamelCase : Dict = 'train-images-idx3-ubyte.gz'
__lowerCamelCase : int = 'train-labels-idx1-ubyte.gz'
__lowerCamelCase : Union[str, Any] = 't10k-images-idx3-ubyte.gz'
__lowerCamelCase : Tuple = 't10k-labels-idx1-ubyte.gz'
__lowerCamelCase : Dict = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_images_file )
with gfile.Open(UpperCAmelCase_ , 'rb' ) as f:
__lowerCamelCase : Union[str, Any] = _extract_images(UpperCAmelCase_ )
__lowerCamelCase : List[str] = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase_ , 'rb' ) as f:
__lowerCamelCase : List[Any] = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ )
__lowerCamelCase : str = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_images_file )
with gfile.Open(UpperCAmelCase_ , 'rb' ) as f:
__lowerCamelCase : Dict = _extract_images(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase_ , 'rb' ) as f:
__lowerCamelCase : List[str] = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ )
if not 0 <= validation_size <= len(UpperCAmelCase_ ):
__lowerCamelCase : int = (
'Validation size should be between 0 and '
F'{len(UpperCAmelCase_ )}. Received: {validation_size}.'
)
raise ValueError(UpperCAmelCase_ )
__lowerCamelCase : Dict = train_images[:validation_size]
__lowerCamelCase : str = train_labels[:validation_size]
__lowerCamelCase : List[str] = train_images[validation_size:]
__lowerCamelCase : int = train_labels[validation_size:]
__lowerCamelCase : Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
__lowerCamelCase : Union[str, Any] = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
__lowerCamelCase : Any = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ )
| 13 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=2 , _a=True , _a=False , _a=1_0 , _a=3 , _a=3_2 * 4 , _a=3_2 * 6 , _a=4 , _a=3_2 , ) -> Union[str, Any]:
_a : int = parent
_a : List[str] = batch_size
_a : Optional[int] = is_training
_a : Tuple = use_auxiliary_loss
_a : Tuple = num_queries
_a : List[str] = num_channels
_a : Tuple = min_size
_a : Union[str, Any] = max_size
_a : Optional[Any] = num_labels
_a : int = mask_feature_size
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
_a : List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
_a : int = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
_a : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowercase ( self ) -> List[Any]:
_a , _a , _a , _a , _a : Optional[int] = self.prepare_config_and_inputs()
_a : str = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __lowercase ( self , _a , _a ) -> Optional[Any]:
_a : Dict = output.encoder_hidden_states
_a : Tuple = output.pixel_decoder_hidden_states
_a : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_config.decoder_layers )
def __lowercase ( self , _a , _a , _a , _a=False ) -> Tuple:
with torch.no_grad():
_a : Tuple = MaskFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(pixel_values=_a , pixel_mask=_a )
_a : int = model(_a , output_hidden_states=_a )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def __lowercase ( self , _a , _a , _a , _a , _a ) -> Tuple:
_a : Tuple = MaskFormerForInstanceSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Optional[Any] = model(pixel_values=_a , pixel_mask=_a )
_a : str = model(_a )
comm_check_on_output(_a )
_a : Tuple = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
def __lowercase ( self ) -> str:
_a : str = MaskFormerModelTester(self )
_a : Any = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowercase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a )
def __lowercase ( self ) -> str:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __lowercase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Tuple:
pass
def __lowercase ( self ) -> str:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a : Any = MaskFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self ) -> List[str]:
_a : Any = (self.model_tester.min_size,) * 2
_a : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_a ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=_a ),
'''class_labels''': torch.zeros(2 , 1_0 , device=_a ).long(),
}
_a : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_a )
_a : List[Any] = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self ) -> Any:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a )
def __lowercase ( self ) -> List[str]:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_a ).to(_a )
_a : str = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a : Tuple = self.all_model_classes[1]
_a , _a , _a , _a , _a : int = self.model_tester.prepare_config_and_inputs()
_a : Dict = model_class(_a )
model.to(_a )
model.train()
_a : Optional[int] = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def __lowercase ( self ) -> Dict:
# only MaskFormerForInstanceSegmentation has the loss
_a : List[str] = self.all_model_classes[1]
_a , _a , _a , _a , _a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : Union[str, Any] = True
_a : Tuple = True
_a : Optional[Any] = model_class(_a )
model.to(_a )
model.train()
_a : Optional[int] = model(_a , mask_labels=_a , class_labels=_a )
_a : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a__ = 1E-4
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __lowercase ( self ) -> Any:
_a : Dict = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_a )
_a : Optional[int] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : int = image_processor(_a , return_tensors='''pt''' ).to(_a )
_a : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a : List[str] = model(**_a )
_a : Union[str, Any] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
_a : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
_a : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def __lowercase ( self ) -> Optional[Any]:
_a : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_a )
.eval()
)
_a : Optional[int] = self.default_image_processor
_a : Optional[int] = prepare_img()
_a : Union[str, Any] = image_processor(_a , return_tensors='''pt''' ).to(_a )
_a : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a : Any = model(**_a )
# masks_queries_logits
_a : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_a : Tuple = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
_a : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Dict = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __lowercase ( self ) -> str:
_a : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_a )
.eval()
)
_a : Dict = self.default_image_processor
_a : str = prepare_img()
_a : Union[str, Any] = image_processor(_a , return_tensors='''pt''' ).to(_a )
_a : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a : int = model(**_a )
# masks_queries_logits
_a : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : Union[str, Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_a : str = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Union[str, Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_a )
.eval()
)
_a : Optional[Any] = self.default_image_processor
_a : str = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_a : List[str] = inputs['''pixel_values'''].to(_a )
_a : Dict = [el.to(_a ) for el in inputs['''mask_labels''']]
_a : List[str] = [el.to(_a ) for el in inputs['''class_labels''']]
with torch.no_grad():
_a : Tuple = model(**_a )
self.assertTrue(outputs.loss is not None )
| 14 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 0 |
import datasets
from .evaluate import evaluate
A : Union[str, Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
A : Any = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowercase__ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowercase__ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 15 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 0 |
import qiskit
def __a ( A__ : int = 2 ):
SCREAMING_SNAKE_CASE = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(A__ , A__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A__ ) ) , list(range(A__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE = qiskit.execute(A__ , A__ , shots=1000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=8 ):
'''simple docstring'''
_lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
_lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
if latents is None:
_lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_lowerCAmelCase = latents.to(_lowerCAmelCase )
_lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self , _lowerCAmelCase=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
_lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
_lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ) -> int:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 512 , _lowerCAmelCase = 512 , _lowerCAmelCase = 100 , _lowerCAmelCase = 4.0 , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , ) -> List[str]:
_lowerCAmelCase = self._execution_device
_lowerCAmelCase = guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
_lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowerCAmelCase = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
_lowerCAmelCase = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
_lowerCAmelCase = hint.repeat_interleave(_lowerCAmelCase , dim=0 )
_lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
_lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
_lowerCAmelCase = self.scheduler.timesteps
_lowerCAmelCase = self.movq.config.latent_channels
_lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
_lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = {"image_embeds": image_embeds, "hint": hint}
_lowerCAmelCase = self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
_lowerCAmelCase = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_lowerCAmelCase = image * 0.5 + 0.5
_lowerCAmelCase = image.clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 18 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_a = """examples/"""
_a = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_a = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_a = """README.md"""
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
with open(__snake_case, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
_UpperCamelCase = f.read()
_UpperCamelCase , _UpperCamelCase = REPLACE_PATTERNS[pattern]
_UpperCamelCase = replace.replace('''VERSION''', __snake_case )
_UpperCamelCase = re_pattern.sub(__snake_case, __snake_case )
with open(__snake_case, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case, __snake_case ), __snake_case, pattern='''examples''' )
def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case, __snake_case, __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''🤗 Transformers currently provides the following architectures'''
_UpperCamelCase = '''1. Want to contribute a new model?'''
with open(__snake_case, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
_UpperCamelCase = f.readlines()
# Find the start of the list.
_UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_UpperCamelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''', '''https://huggingface.co/docs/transformers/model_doc''', )
index += 1
with open(__snake_case, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
_UpperCamelCase = f.read()
_UpperCamelCase = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowerCamelCase__ ( __snake_case=False ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_UpperCamelCase = default_version.base_version
elif patch:
_UpperCamelCase = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_UpperCamelCase = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_UpperCamelCase = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
_UpperCamelCase = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case, patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = get_version()
_UpperCamelCase = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_UpperCamelCase = current_version.base_version
# Check with the user we got that right.
_UpperCamelCase = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
_UpperCamelCase = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_a = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 19 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _lowercase( __a : ndarray ):
return np.dot(__a , __a )
class lowercase_ :
def __init__( self , *,
lowercase_ = np.inf , lowercase_ = "linear" , lowercase_ = 0.0 , ) -> None:
a__ =regularization
a__ =gamma
if kernel == "linear":
a__ =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ =F"""Unknown kernel: {kernel}"""
raise ValueError(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> float:
return np.dot(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =observations
a__ =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) =np.shape(lowercase_)
def to_minimize(lowercase_) -> float:
a__ =0
((a__) , ) =np.shape(lowercase_)
for i in range(lowercase_):
for j in range(lowercase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase_)
a__ =LinearConstraint(lowercase_ , 0 , 0)
a__ =Bounds(0 , self.regularization)
a__ =minimize(
lowercase_ , np.ones(lowercase_) , bounds=lowercase_ , constraints=[ly_contraint]).x
a__ =l_star
# calculating mean offset of separation plane to points
a__ =0
for i in range(lowercase_):
for j in range(lowercase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ =s / n
def __UpperCamelCase ( self , lowercase_) -> int:
a__ =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ : Dict = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
UpperCAmelCase_ : Any = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __A ( UpperCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = NllbTokenizer
UpperCamelCase = []
UpperCamelCase = []
def __init__( self :Any , __snake_case :Union[str, Any]=None , __snake_case :Optional[int]=None , __snake_case :str="<s>" , __snake_case :Union[str, Any]="</s>" , __snake_case :Optional[int]="</s>" , __snake_case :Optional[Any]="<s>" , __snake_case :List[str]="<unk>" , __snake_case :List[str]="<pad>" , __snake_case :List[Any]="<mask>" , __snake_case :List[Any]=None , __snake_case :Any=None , __snake_case :Optional[Any]=None , __snake_case :Optional[Any]=False , **__snake_case :Optional[Any] , ):
'''simple docstring'''
__magic_name__ : Any =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
__magic_name__ : Any =legacy_behaviour
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , legacy_behaviour=__snake_case , **__snake_case , )
__magic_name__ : Optional[int] =vocab_file
__magic_name__ : List[str] =False if not self.vocab_file else True
__magic_name__ : str =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__magic_name__ : Union[str, Any] ={
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__magic_name__ : Optional[Any] =src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Union[str, Any] =self.convert_tokens_to_ids(self._src_lang )
__magic_name__ : Any =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A__ ( self :int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : str =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self :List[Any] , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self :str , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[self.sep_token_id]
__magic_name__ : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self :List[Any] , __snake_case :int , __snake_case :str , __snake_case :Optional[str] , __snake_case :Optional[str] , **__snake_case :Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : str =src_lang
__magic_name__ : Any =self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
__magic_name__ : Union[str, Any] =self.convert_tokens_to_ids(__snake_case )
__magic_name__ : Tuple =tgt_lang_id
return inputs
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :str = "eng_Latn" , __snake_case :Optional[List[str]] = None , __snake_case :str = "fra_Latn" , **__snake_case :List[Any] , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =src_lang
__magic_name__ : List[Any] =tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def A__ ( self :str ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self :Optional[int] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : Dict =self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
__magic_name__ : Any =[]
__magic_name__ : str =[self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] =[self.cur_lang_code]
__magic_name__ : Tuple =[self.eos_token_id]
__magic_name__ : int =self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ : Dict =self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ : Union[str, Any] =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ ( self :Optional[int] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
__magic_name__ : Any =[]
__magic_name__ : Optional[Any] =[self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : List[Any] =[self.cur_lang_code]
__magic_name__ : Dict =[self.eos_token_id]
__magic_name__ : Dict =self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ : List[str] =self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ : Optional[Any] =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ ( self :Union[str, Any] , __snake_case :str , __snake_case :Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
__magic_name__ : str =os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 21 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = 0
_a = len(UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase ):
return None
_a = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_a = left
_a = point
elif point > right:
_a = right
_a = point
else:
if item < current_item:
_a = point - 1
else:
_a = point + 1
return None
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase , UpperCamelCase , UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase , UpperCamelCase , point + 1 , UpperCamelCase )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
if collection != sorted(UpperCamelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_snake_case : int = 0
if debug == 1:
_snake_case : Optional[int] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
_snake_case : Tuple = 67
_snake_case : Tuple = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 22 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case (__lowercase):
def decorator(__lowercase):
UpperCamelCase_ = getattr(__lowercase , 'handle_key' , [])
handle += [key]
setattr(__lowercase , 'handle_key' , __lowercase)
return func
return decorator
def _snake_case (*__lowercase):
def decorator(__lowercase):
UpperCamelCase_ = getattr(__lowercase , 'handle_key' , [])
handle += keys
setattr(__lowercase , 'handle_key' , __lowercase)
return func
return decorator
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __new__( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = super().__new__(cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not hasattr(_UpperCAmelCase , 'key_handler' ):
setattr(_UpperCAmelCase , 'key_handler' , {} )
setattr(_UpperCAmelCase , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase_ = getattr(_UpperCAmelCase , 'handle_key' , [] )
for key in handled_keys:
UpperCamelCase_ = value
return new_cls
@staticmethod
def _UpperCAmelCase ( cls ) -> str:
UpperCamelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase_ = ord(_UpperCAmelCase )
UpperCamelCase_ = cls.key_handler.get(_UpperCAmelCase )
if handler:
UpperCamelCase_ = char
return handler(cls )
else:
return None
def _snake_case (cls):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy())
| 23 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase_ : Tuple = '''<<<<<<< This should probably be modified because it mentions: '''
UpperCAmelCase_ : Optional[int] = '''=======
>>>>>>>
'''
UpperCAmelCase_ : Tuple = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
UpperCAmelCase_ : Optional[int] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _UpperCamelCase (_lowerCamelCase : Namespace )-> int:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase ( __lowerCAmelCase):
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = get_logger('''datasets-cli/converting''' )
__snake_case = tfds_path
__snake_case = datasets_directory
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__snake_case = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__snake_case = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__snake_case = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__snake_case = []
__snake_case = []
__snake_case = {}
if os.path.isdir(self._tfds_path ):
__snake_case = os.listdir(__SCREAMING_SNAKE_CASE )
else:
__snake_case = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__snake_case = f.readlines()
__snake_case = []
__snake_case = False
__snake_case = False
__snake_case = []
for line in lines:
__snake_case = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__snake_case = ''''''
continue
elif "from absl import logging" in out_line:
__snake_case = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__snake_case = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__snake_case = True
__snake_case = list(filter(lambda __SCREAMING_SNAKE_CASE : e in out_line , __SCREAMING_SNAKE_CASE ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__SCREAMING_SNAKE_CASE ) + '''\n''' )
out_lines.append(__SCREAMING_SNAKE_CASE )
out_lines.append(__SCREAMING_SNAKE_CASE )
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __SCREAMING_SNAKE_CASE )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__snake_case = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case = True
out_lines.append(__SCREAMING_SNAKE_CASE )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case = f_name.replace('''.py''' , '''''' )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__SCREAMING_SNAKE_CASE )
if needs_manual_update:
with_manual_update.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__snake_case = os.path.basename(__SCREAMING_SNAKE_CASE )
__snake_case = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 24 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : List[Any] = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : str = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Dict = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Any = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[str] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Any = "weight"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split(".")
SCREAMING_SNAKE_CASE : Any = int(items[0])
SCREAMING_SNAKE_CASE : Any = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=None , _a=None , _a=True):
if config_path is not None:
SCREAMING_SNAKE_CASE : Tuple = UniSpeechSatConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE : List[str] = ""
if is_finetuned:
SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForCTC(_a)
else:
SCREAMING_SNAKE_CASE : str = UniSpeechSatForPreTraining(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
SCREAMING_SNAKE_CASE : Union[str, Any] = model[0].eval()
recursively_load_weights(_a , _a)
hf_wavavec.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class _A :
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = []
def lowercase__ ( self : int , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return self.node_position[vertex]
def lowercase__ ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = pos
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__snake_case : int = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__snake_case : int = 2 * start + 1
else:
__snake_case : Any = 2 * start + 2
if heap[smallest_child] < heap[start]:
__snake_case , __snake_case : Tuple = heap[smallest_child], positions[smallest_child]
__snake_case , __snake_case : Dict = (
heap[start],
positions[start],
)
__snake_case , __snake_case : Union[str, Any] = temp, tempa
__snake_case : Tuple = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = position[index]
while index != 0:
__snake_case : List[str] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__snake_case : Optional[Any] = heap[parent]
__snake_case : Dict = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
__snake_case : int = val
__snake_case : int = temp
self.set_position(__magic_name__ , __magic_name__ )
break
__snake_case : List[str] = parent
else:
__snake_case : Dict = val
__snake_case : Optional[int] = temp
self.set_position(__magic_name__ , 0 )
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : Any = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = positions[0]
__snake_case : Optional[int] = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = Heap()
__snake_case : List[Any] = [0] * len(_lowerCamelCase )
__snake_case : Dict = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__snake_case : List[Any] = [] # Heap of Distance of vertices from their neighboring vertex
__snake_case : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
__snake_case : Optional[int] = []
__snake_case : List[str] = 1
__snake_case : Any = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__snake_case : List[str] = 0
__snake_case : List[Any] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
__snake_case : Tuple = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__snake_case : Any = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
__snake_case : Tuple = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
__snake_case : int = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase = int(input("Enter number of edges: ").strip())
__UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__A : str = logging.get_logger(__name__)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 27 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
return x + 2
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'x = 3'
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(A, {}, state=A )
assert result == 3
self.assertDictEqual(A, {'x': 3} )
SCREAMING_SNAKE_CASE : Any = 'x = y'
SCREAMING_SNAKE_CASE : Tuple = {'y': 5}
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A, {'x': 5, 'y': 5} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 'y = add_two(x)'
SCREAMING_SNAKE_CASE : str = {'x': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(A, {'add_two': add_two}, state=A )
assert result == 5
self.assertDictEqual(A, {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'x = 3'
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
assert result == 3
self.assertDictEqual(A, {'x': 3} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
SCREAMING_SNAKE_CASE : List[Any] = {'x': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(A, {'add_two': add_two}, state=A )
self.assertDictEqual(A, {'x': 3, 'y': 5} )
self.assertDictEqual(A, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 'x = 3\ny = 5'
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : str = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A, {'x': 3, 'y': 5} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'text = f\'This is x: {x}.\''
SCREAMING_SNAKE_CASE : Any = {'x': 3}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A, {'x': 3, 'text': 'This is x: 3.'} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'if x <= 3:\n y = 2\nelse:\n y = 5'
SCREAMING_SNAKE_CASE : List[Any] = {'x': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A, {'x': 3, 'y': 2} )
SCREAMING_SNAKE_CASE : Optional[int] = {'x': 8}
SCREAMING_SNAKE_CASE : str = evaluate(A, {}, state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A, {'x': 8, 'y': 5} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'test_list = [x, add_two(x)]'
SCREAMING_SNAKE_CASE : Dict = {'x': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {'add_two': add_two}, state=A )
self.assertListEqual(A, [3, 5] )
self.assertDictEqual(A, {'x': 3, 'test_list': [3, 5]} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'y = x'
SCREAMING_SNAKE_CASE : Any = {'x': 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(A, {}, state=A )
assert result == 3
self.assertDictEqual(A, {'x': 3, 'y': 3} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'test_list = [x, add_two(x)]\ntest_list[1]'
SCREAMING_SNAKE_CASE : Any = {'x': 3}
SCREAMING_SNAKE_CASE : str = evaluate(A, {'add_two': add_two}, state=A )
assert result == 5
self.assertDictEqual(A, {'x': 3, 'test_list': [3, 5]} )
SCREAMING_SNAKE_CASE : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
SCREAMING_SNAKE_CASE : Optional[int] = {'x': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(A, {'add_two': add_two}, state=A )
assert result == 5
self.assertDictEqual(A, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'x = 0\nfor i in range(3):\n x = i'
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = evaluate(A, {'range': range}, state=A )
assert result == 2
self.assertDictEqual(A, {'x': 2, 'i': 2} )
| 28 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
A_ = logging.get_logger(__name__)
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 29 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''glpn'''
def __init__( self ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=[2, 2, 2, 2] ,_SCREAMING_SNAKE_CASE=[8, 4, 2, 1] ,_SCREAMING_SNAKE_CASE=[32, 64, 160, 256] ,_SCREAMING_SNAKE_CASE=[7, 3, 3, 3] ,_SCREAMING_SNAKE_CASE=[4, 2, 2, 2] ,_SCREAMING_SNAKE_CASE=[1, 2, 5, 8] ,_SCREAMING_SNAKE_CASE=[4, 4, 4, 4] ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-6 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=-1 ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Optional[int] = num_encoder_blocks
UpperCAmelCase_ : Tuple = depths
UpperCAmelCase_ : Tuple = sr_ratios
UpperCAmelCase_ : Optional[int] = hidden_sizes
UpperCAmelCase_ : Any = patch_sizes
UpperCAmelCase_ : Union[str, Any] = strides
UpperCAmelCase_ : str = mlp_ratios
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = decoder_hidden_size
UpperCAmelCase_ : Optional[Any] = max_depth
UpperCAmelCase_ : Any = head_in_index
| 30 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : List[str] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 31 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self ):
_UpperCAmelCase = ''''''
_UpperCAmelCase = ''''''
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 256
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = cva.imread(_UpperCamelCase , 0 )
_UpperCAmelCase = copy.deepcopy(self.img )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
_UpperCAmelCase = np.sum(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
_UpperCAmelCase = x[i] / self.k
self.sk += prk
_UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
_UpperCAmelCase = int(last % last )
_UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_UpperCamelCase )
_UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
_UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
_UpperCAmelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def UpperCamelCase( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCAmelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : Optional[Any] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCamelCase__ : int = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCamelCase__ : Any = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any , _a:Tuple , _a:str=None , _a:str=None , _a:List[Any]=None , _a:Dict=None , _a:List[Any]="auto" , _a:Optional[int]=-1 , _a:int=0.9 , _a:str=5 , _a:List[str]=5_00 , _a:Tuple="gpt2-large" , _a:Union[str, Any]=-1 , _a:Optional[int]=10_24 , _a:Optional[Any]=25 , _a:Optional[Any]=5 , _a:Optional[Any]=True , _a:List[str]=25 , ):
snake_case__ = compute_mauve(
p_text=_a , q_text=_a , p_features=_a , q_features=_a , p_tokens=_a , q_tokens=_a , num_buckets=_a , pca_max_data=_a , kmeans_explained_var=_a , kmeans_num_redo=_a , kmeans_max_iter=_a , featurize_model_name=_a , device_id=_a , max_text_length=_a , divergence_curve_discretization_size=_a , mauve_scaling_factor=_a , verbose=_a , seed=_a , )
return out
| 33 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
SCREAMING_SNAKE_CASE_ = {'comet'}
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec('fairseq') is not None
SCREAMING_SNAKE_CASE_ = {'code_eval'}
SCREAMING_SNAKE_CASE_ = os.name == 'nt'
SCREAMING_SNAKE_CASE_ = {'bertscore', 'frugalscore', 'perplexity'}
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec('transformers') is not None
def __snake_case ( _lowercase ):
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self ,_lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,_lowercase )
return wrapper
def __snake_case ( _lowercase ):
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self ,_lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,_lowercase )
return wrapper
def __snake_case ( _lowercase ):
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self ,_lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,_lowercase )
return wrapper
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@local
class snake_case_ ( parameterized.TestCase ):
"""simple docstring"""
A_ = {}
A_ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''')
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''')
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
UpperCamelCase = '''[...]'''
UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCamelCase_)).module_path)
UpperCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase_)
# check parameters
UpperCamelCase = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCamelCase_ , metric_module.__name__):
with self.use_local_metrics():
try:
UpperCamelCase = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = '''[...]'''
UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCamelCase_)).module_path)
# run doctest
with self.use_local_metrics():
UpperCamelCase = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase_):
yield
else:
yield
@contextmanager
def UpperCAmelCase__ ( self) -> Dict:
def load_local_metric(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_):
return load_metric(os.path.join('''metrics''' , lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_)
with patch('''datasets.load_metric''') as mock_load_metric:
UpperCamelCase = load_local_metric
yield
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_) -> Tuple:
def wrapper(lowerCamelCase_):
UpperCamelCase = contextmanager(lowerCamelCase_)
UpperCamelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __snake_case ( _lowercase ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
assert len(input_dict['''input_ids''']) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
UpperCamelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __snake_case ( _lowercase ):
"""simple docstring"""
import torch
def bert_cos_score_idf(_lowercase ,_lowercase ,*_lowercase ,**_lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowercase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
UpperCamelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __snake_case ( _lowercase ):
"""simple docstring"""
def load_from_checkpoint(_lowercase ):
class snake_case_ :
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
assert len(lowerCamelCase_) == 2
UpperCamelCase = [0.19, 0.92]
return scores, sum(lowerCamelCase_) / len(lowerCamelCase_)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
UpperCamelCase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
UpperCamelCase = load_from_checkpoint
yield
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
UpperCamelCase = '''ERROR'''
UpperCamelCase = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(_lowercase ,match=re.escape(_lowercase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=_lowercase )
| 34 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 0 |
import argparse
import os
import re
import packaging.version
a_ :int = 'examples/'
a_ :Dict = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
a_ :Union[str, Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
a_ :List[str] = 'README.md'
def a ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : Any = f.read()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE__ : Tuple = replace.replace('''VERSION''' , A__ )
SCREAMING_SNAKE_CASE__ : int = re_pattern.sub(A__ , A__ )
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A__ )
def a ( A__ ) -> List[Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(A__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern='''examples''' )
def a ( A__ , A__=False ) -> Union[str, Any]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A__ , A__ , A__ )
if not patch:
update_version_in_examples(A__ )
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''1. Want to contribute a new model?'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : str = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE__ : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE__ : Any = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A__ )
def a ( ) -> Dict:
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Tuple = f.read()
SCREAMING_SNAKE_CASE__ : int = REPLACE_PATTERNS['''init'''][0].search(A__ ).groups()[0]
return packaging.version.parse(A__ )
def a ( A__=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE__ : Dict = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE__ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
SCREAMING_SNAKE_CASE__ : List[str] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A__ ) == 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A__ , patch=A__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = get_version()
SCREAMING_SNAKE_CASE__ : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
SCREAMING_SNAKE_CASE__ : List[Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE__ : Any = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A__ ) == 0:
SCREAMING_SNAKE_CASE__ : List[str] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a_ :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
a_ :int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 35 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 36 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'timm_backbone'
def __init__( self : Any , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=None , **lowerCamelCase__ : List[str] , ):
super().__init__(**lowerCamelCase__ )
a__ : Any = backbone
a__ : Any = num_channels
a__ : Union[str, Any] = features_only
a__ : List[str] = use_pretrained_backbone
a__ : Optional[Any] = True
a__ : Optional[int] = out_indices if out_indices is not None else (-1,)
| 37 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 0 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = Generator(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
# Build iterable dataset
if self.streaming:
snake_case__ : Dict = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
snake_case__ : Optional[Any] = None
snake_case__ : List[Any] = None
snake_case__ : Dict = None
snake_case__ : int = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
snake_case__ : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 38 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[str] = None
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Dict="<pad>" , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=False , **_UpperCamelCase : Tuple , ) ->Any:
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCamelCase ) != add_prefix_space:
snake_case_ = getattr(_UpperCamelCase , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_UpperCamelCase )
snake_case_ = add_prefix_space
def snake_case__( self : Union[str, Any] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Any , *_UpperCamelCase : Dict , **_UpperCamelCase : Optional[Any] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : "Conversation" ) ->List[int]:
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 39 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.