code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
a_ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =['''image_processor''', '''tokenizer''']
_UpperCAmelCase ='''ChineseCLIPImageProcessor'''
_UpperCAmelCase =('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self: Optional[Any] , a: Union[str, Any]=None , a: List[Any]=None , **a: Optional[Any]) ->int:
'''simple docstring'''
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
a_ = kwargs.pop("feature_extractor")
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(a , a)
a_ = self.image_processor
def __call__( self: Optional[int] , a: Union[str, Any]=None , a: Union[str, Any]=None , a: Optional[int]=None , **a: List[str]) ->int:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
a_ = self.tokenizer(a , return_tensors=a , **a)
if images is not None:
a_ = self.image_processor(a , return_tensors=a , **a)
if text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a) , tensor_type=a)
def _lowerCAmelCase ( self: List[Any] , *a: Union[str, Any] , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a)
def _lowerCAmelCase ( self: Any , *a: int , **a: Optional[int]) ->Tuple:
'''simple docstring'''
return self.tokenizer.decode(*a , **a)
@property
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = self.tokenizer.model_input_names
a_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
a_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''masked_bert'''
def __init__( self: int , a: Dict=3_05_22 , a: List[Any]=7_68 , a: int=12 , a: Union[str, Any]=12 , a: str=30_72 , a: List[Any]="gelu" , a: Tuple=0.1 , a: Tuple=0.1 , a: Union[str, Any]=5_12 , a: List[str]=2 , a: str=0.02 , a: Dict=1e-12 , a: Any=0 , a: List[str]="topK" , a: Optional[int]="constant" , a: Any=0.0 , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , **a)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = pruning_method
a_ = mask_init
a_ = mask_scale
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
a_ = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''deformable_detr'''
_UpperCAmelCase ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self: Tuple , a: str=True , a: Tuple=None , a: str=3 , a: List[str]=3_00 , a: int=10_24 , a: Dict=6 , a: Dict=10_24 , a: Union[str, Any]=8 , a: Optional[Any]=6 , a: Any=10_24 , a: Any=8 , a: Any=0.0 , a: Optional[Any]=True , a: List[Any]="relu" , a: List[Any]=2_56 , a: Dict=0.1 , a: str=0.0 , a: int=0.0 , a: List[str]=0.02 , a: Union[str, Any]=1.0 , a: Optional[Any]=True , a: Tuple=False , a: List[str]="sine" , a: List[Any]="resnet50" , a: Dict=True , a: Union[str, Any]=False , a: Optional[int]=4 , a: Optional[int]=4 , a: Tuple=4 , a: int=False , a: Tuple=3_00 , a: Any=False , a: List[str]=1 , a: Any=5 , a: Any=2 , a: List[str]=1 , a: Union[str, Any]=1 , a: Union[str, Any]=5 , a: str=2 , a: Any=0.1 , a: str=0.25 , a: str=False , **a: Optional[int] , ) ->Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
a_ = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(a , a):
a_ = backbone_config.get("model_type")
a_ = CONFIG_MAPPING[backbone_model_type]
a_ = config_class.from_dict(a)
a_ = use_timm_backbone
a_ = backbone_config
a_ = num_channels
a_ = num_queries
a_ = max_position_embeddings
a_ = d_model
a_ = encoder_ffn_dim
a_ = encoder_layers
a_ = encoder_attention_heads
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = init_xavier_std
a_ = encoder_layerdrop
a_ = auxiliary_loss
a_ = position_embedding_type
a_ = backbone
a_ = use_pretrained_backbone
a_ = dilation
# deformable attributes
a_ = num_feature_levels
a_ = encoder_n_points
a_ = decoder_n_points
a_ = two_stage
a_ = two_stage_num_proposals
a_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
a_ = class_cost
a_ = bbox_cost
a_ = giou_cost
# Loss coefficients
a_ = mask_loss_coefficient
a_ = dice_loss_coefficient
a_ = bbox_loss_coefficient
a_ = giou_loss_coefficient
a_ = eos_coefficient
a_ = focal_alpha
a_ = disable_custom_kernels
super().__init__(is_encoder_decoder=a , **a)
@property
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
return self.d_model
def _lowerCAmelCase ( self: List[Any]) ->int:
'''simple docstring'''
a_ = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
a_ = self.backbone_config.to_dict()
a_ = self.__class__.model_type
return output
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =PegasusTokenizer
_UpperCAmelCase =PegasusTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a_ = PegasusTokenizer(a)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _lowerCAmelCase ( self: Any) ->Optional[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/pegasus-large")
def _lowerCAmelCase ( self: Union[str, Any] , **a: Optional[int]) ->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: str , a: Optional[int]) ->Optional[Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def _lowerCAmelCase ( self: Tuple) ->Tuple:
'''simple docstring'''
a_ = "</s>"
a_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def _lowerCAmelCase ( self: Union[str, Any]) ->List[str]:
'''simple docstring'''
a_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "</s>")
self.assertEqual(vocab_keys[-1] , "v")
self.assertEqual(len(a) , 11_03)
def _lowerCAmelCase ( self: Union[str, Any]) ->List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03)
def _lowerCAmelCase ( self: Tuple) ->Any:
'''simple docstring'''
a_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a_ = self.tokenizer_class.from_pretrained(self.tmpdirname)
a_ = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
a_ = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a).input_ids[0]
a_ = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a).input_ids[0]
self.assertListEqual(a , a)
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
a_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a_ = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
a_ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
a_ = tokenizer([raw_input_str] , return_tensors=a).input_ids[0]
self.assertListEqual(a , a)
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
a_ = "To ensure a smooth flow of bank resolutions."
a_ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
a_ = tokenizer([raw_input_str] , return_tensors=a).input_ids[0]
self.assertListEqual(a , a)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowerCAmelCase ( self: int) ->Union[str, Any]:
'''simple docstring'''
a_ = ["This is going to be way too long." * 1_50, "short example"]
a_ = ["not super long but more than 5 tokens", "tiny"]
a_ = self._large_tokenizer(a , padding=a , truncation=a , return_tensors="pt")
a_ = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors="pt")
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(a) == 2 # input_ids, attention_mask.
@slow
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
a_ = {"input_ids": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =PegasusTokenizer
_UpperCAmelCase =PegasusTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Union[str, Any]) ->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a_ = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token="[MASK]")
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _lowerCAmelCase ( self: Tuple) ->Any:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv")
def _lowerCAmelCase ( self: List[Any] , **a: Dict) ->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: str , a: List[Any]) ->Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a_ = self.tokenizer_class.from_pretrained(self.tmpdirname)
a_ = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
a_ = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a).input_ids[0]
a_ = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a).input_ids[0]
self.assertListEqual(a , a)
@require_torch
def _lowerCAmelCase ( self: Tuple) ->Union[str, Any]:
'''simple docstring'''
a_ = ["This is going to be way too long." * 10_00, "short example"]
a_ = ["not super long but more than 5 tokens", "tiny"]
a_ = self._large_tokenizer(a , padding=a , truncation=a , return_tensors="pt")
a_ = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors="pt")
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(a) == 2 # input_ids, attention_mask.
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
a_ = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
a_ = self._large_tokenizer(a).input_ids
self.assertListEqual(
a , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
a_ = int(lowercase__ )
a_ , a_ , a_ = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__=300 ) -> Union[str, Any]:
'''simple docstring'''
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
a_ = F"""{elt:.6f}""" if isinstance(lowercase__ ,lowercase__ ) else str(lowercase__ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =5
_UpperCAmelCase =0.2
def __init__( self: List[str] , a: int , a: Optional[str] = None , a: bool = True , a: Optional["NotebookTrainingTracker"] = None , a: int = 3_00 , ) ->Union[str, Any]:
'''simple docstring'''
a_ = total
a_ = "" if prefix is None else prefix
a_ = leave
a_ = parent
a_ = width
a_ = None
a_ = None
a_ = None
def _lowerCAmelCase ( self: List[str] , a: int , a: bool = False , a: str = None) ->str:
'''simple docstring'''
a_ = value
if comment is not None:
a_ = comment
if self.last_value is None:
a_ = a_ = time.time()
a_ = a_ = value
a_ = a_ = None
a_ = self.warmup
a_ = 1
self.update_bar(a)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
a_ = time.time()
a_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
a_ = self.elapsed_time / (value - self.start_value)
else:
a_ = None
if value >= self.total:
a_ = self.total
a_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
a_ = self.average_time_per_item * (self.total - value)
self.update_bar(a)
a_ = value
a_ = current_time
if self.average_time_per_item is None:
a_ = 1
else:
a_ = max(int(self.update_every / self.average_time_per_item) , 1)
def _lowerCAmelCase ( self: Union[str, Any] , a: List[Any] , a: Union[str, Any]=None) ->Optional[Any]:
'''simple docstring'''
a_ = " " * (len(str(self.total)) - len(str(a))) + str(a)
if self.elapsed_time is None:
a_ = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
a_ = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"""
else:
a_ = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"""
f""" {format_time(self.predicted_remaining)}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment) == 0 else f""", {self.comment}]"""
self.display()
def _lowerCAmelCase ( self: int) ->Union[str, Any]:
'''simple docstring'''
a_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
a_ = disp.display(disp.HTML(self.html_code) , display_id=a)
else:
self.output.update(disp.HTML(self.html_code))
def _lowerCAmelCase ( self: int) ->List[Any]:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Optional[Any] , a: Tuple , a: List[str]=None) ->List[str]:
'''simple docstring'''
super().__init__(a)
a_ = None if column_names is None else [column_names]
a_ = None
def _lowerCAmelCase ( self: int) ->List[Any]:
'''simple docstring'''
a_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
a_ = disp.display(disp.HTML(self.html_code) , display_id=a)
else:
self.output.update(disp.HTML(self.html_code))
def _lowerCAmelCase ( self: Dict , a: Union[str, Any]) ->Dict:
'''simple docstring'''
if self.inner_table is None:
a_ = [list(values.keys()), list(values.values())]
else:
a_ = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(a)
a_ = columns
self.inner_table.append([values[c] for c in columns])
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Dict=None , a: Optional[Any]=3_00) ->Optional[int]:
'''simple docstring'''
a_ = NotebookProgressBar(a , prefix=a , parent=self , width=a)
return self.child_bar
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
a_ = None
self.display()
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Union[str, Any]) ->List[str]:
'''simple docstring'''
a_ = None
a_ = None
a_ = False
def _lowerCAmelCase ( self: List[str] , a: Union[str, Any] , a: List[str] , a: Dict , **a: Any) ->Tuple:
'''simple docstring'''
a_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
a_ = 0
a_ = 0
a_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
a_ = NotebookTrainingTracker(state.max_steps , a)
def _lowerCAmelCase ( self: List[str] , a: List[Any] , a: List[str] , a: Any , **a: List[Any]) ->List[str]:
'''simple docstring'''
a_ = int(state.epoch) if int(state.epoch) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
a_ = False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: Dict , a: int , a: Dict=None , **a: List[str]) ->List[Any]:
'''simple docstring'''
if not has_length(a):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
a_ = self.training_tracker.add_child(len(a))
else:
a_ = NotebookProgressBar(len(a))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def _lowerCAmelCase ( self: Optional[Any] , a: Tuple , a: Any , a: Union[str, Any] , **a: List[Any]) ->Optional[Any]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
a_ = None
def _lowerCAmelCase ( self: Optional[Any] , a: List[str] , a: Optional[int] , a: Dict , a: Optional[int]=None , **a: List[Any]) ->Dict:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
a_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
a_ = state.global_step
self.training_tracker.write_line(a)
def _lowerCAmelCase ( self: List[Any] , a: Any , a: Dict , a: Dict , a: Optional[Any]=None , **a: List[Any]) ->Any:
'''simple docstring'''
if self.training_tracker is not None:
a_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
a_ = log["loss"]
break
if self.first_column == "Epoch":
a_ = int(state.epoch)
else:
a_ = state.global_step
a_ = "eval"
for k in metrics:
if k.endswith("_loss"):
a_ = re.sub(r"\_loss$" , "" , a)
a_ = metrics.pop("total_flos" , a)
a_ = metrics.pop("epoch" , a)
a_ = metrics.pop(f"""{metric_key_prefix}_runtime""" , a)
a_ = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , a)
a_ = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , a)
a_ = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , a)
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
a_ = v
else:
a_ = k.split("_")
a_ = " ".join([part.capitalize() for part in splits[1:]])
a_ = v
self.training_tracker.write_line(a)
self.training_tracker.remove_child()
a_ = None
# Evaluation takes a long time so we should force the next update.
a_ = True
def _lowerCAmelCase ( self: Any , a: int , a: str , a: Tuple , **a: List[str]) ->List[Any]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch)}/{state.num_train_epochs}""" , force_update=a)
a_ = None
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ = MobileBertConfig.from_json_file(lowercase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a_ = MobileBertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
a_ = load_tf_weights_in_mobilebert(lowercase__ ,lowercase__ ,lowercase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> list[list]:
'''simple docstring'''
a_ = current_set.copy()
for row_index, row in enumerate(lowercase__ ):
a_ = row[0]
for column_index, column in enumerate(lowercase__ ):
if magnitude == 0:
a_ = column
continue
a_ = column / magnitude
# Subtract to cancel term
a_ = current_set[0]
a_ = [first_row]
a_ = current_set[1::]
for row in current_set:
a_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowercase__ )
continue
for column_index in range(len(lowercase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowercase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a_ = final_set[0]
a_ = []
a_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a_ = simplify(lowercase__ )
for i in range(len(lowercase__ ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,lowercase__ )
a_ = resultant
return final_set
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
if len(lowercase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
a_ = len(lowercase__ ) + 1
if any(len(lowercase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowercase__ ,(int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowercase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
a_ = equations.copy()
if any(0 in row for row in data_set ):
a_ = data_set.copy()
a_ = []
for row_index, row in enumerate(lowercase__ ):
if 0 not in row:
a_ = data_set.pop(lowercase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 ,lowercase__ )
a_ = data_set.copy()
a_ = simplify(lowercase__ )
a_ = simplified[::-1]
a_ = []
for row in simplified:
a_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a_ = row.copy()[: len(lowercase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowercase__ ) == 0:
solutions.append(0 )
continue
a_ = temp_row[1::]
a_ = temp_row[::-1]
for column_index, column in enumerate(lowercase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowercase__ )
a_ = []
for item in solutions:
final.append(float(round(lowercase__ ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
from string import ascii_uppercase
a_ = {char: i for i, char in enumerate(ascii_uppercase)}
a_ = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
a_ = len(lowercase__ )
a_ = 0
while True:
if x == i:
a_ = 0
if len(lowercase__ ) == len(lowercase__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
a_ = ""
a_ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a_ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
a_ = ""
a_ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a_ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase () -> None:
'''simple docstring'''
a_ = "THE GERMAN ATTACK"
a_ = "SECRET"
a_ = generate_key(lowercase__ ,lowercase__ )
a_ = cipher_text(lowercase__ ,lowercase__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(lowercase__ ,lowercase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
return choice(lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ = random_pivot(lowercase__ )
# partition based on pivot
# linear time
a_ = [e for e in lst if e < pivot]
a_ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowercase__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowercase__ ) < k - 1:
return kth_number(lowercase__ ,k - len(lowercase__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowercase__ ,lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
a_ = 3
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
print("Generating primitive root of p" )
while True:
a_ = random.randrange(3 ,lowercase__ )
if pow(lowercase__ ,2 ,lowercase__ ) == 1:
continue
if pow(lowercase__ ,lowercase__ ,lowercase__ ) == 1:
continue
return g
def __UpperCAmelCase (lowercase__ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
a_ = rabin_miller.generate_large_prime(lowercase__ ) # select large prime number.
a_ = primitive_root(lowercase__ ) # one primitive root on modulo p.
a_ = random.randrange(3 ,lowercase__ ) # private_key -> have to be greater than 2 for safety.
a_ = cryptomath.find_mod_inverse(pow(lowercase__ ,lowercase__ ,lowercase__ ) ,lowercase__ )
a_ = (key_size, e_a, e_a, p)
a_ = (key_size, d)
return public_key, private_key
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
a_ , a_ = generate_key(lowercase__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" ,"w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" ,"w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __UpperCAmelCase () -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("elgamal" ,2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
a_ = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
a_ = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
_UpperCAmelCase =[]
_UpperCAmelCase =[]
def __init__( self: List[str] , a: List[str] , a: str , a: Tuple=None , a: int=None , a: Optional[int]="<s>" , a: Optional[int]="</s>" , a: Tuple="</s>" , a: Optional[int]="<pad>" , a: Optional[int]="<unk>" , a: Tuple="m2m100" , a: Optional[Dict[str, Any]] = None , a: Dict=8 , **a: List[str] , ) ->None:
'''simple docstring'''
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
a_ = language_codes
a_ = FAIRSEQ_LANGUAGE_CODES[language_codes]
a_ = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
a_ = kwargs.get("additional_special_tokens" , [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(a)
for lang_code in fairseq_language_code
if self.get_lang_token(a) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a , tgt_lang=a , bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , language_codes=a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a , **a , )
a_ = vocab_file
a_ = load_json(a)
a_ = {v: k for k, v in self.encoder.items()}
a_ = spm_file
a_ = load_spm(a , self.sp_model_kwargs)
a_ = len(self.encoder)
a_ = {
self.get_lang_token(a): self.encoder_size + i for i, lang_code in enumerate(a)
}
a_ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a)}
a_ = {v: k for k, v in self.lang_token_to_id.items()}
a_ = src_lang if src_lang is not None else "en"
a_ = tgt_lang
a_ = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
a_ = num_madeup_words
@property
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
return len(self.encoder) + len(self.lang_token_to_id)
@property
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowerCAmelCase ( self: Optional[int] , a: str) ->None:
'''simple docstring'''
a_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCAmelCase ( self: Optional[Any] , a: str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(a , out_type=a)
def _lowerCAmelCase ( self: Any , a: Tuple) ->Union[str, Any]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a , self.encoder[self.unk_token])
def _lowerCAmelCase ( self: Tuple , a: int) ->str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a , self.unk_token)
def _lowerCAmelCase ( self: List[str] , a: int) ->List[str]:
'''simple docstring'''
a_ = []
a_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a) + token
a_ = []
else:
current_sub_tokens.append(a)
out_string += self.sp_model.decode(a)
return out_string.strip()
def _lowerCAmelCase ( self: int , a: List[int] , a: Optional[List[int]] = None , a: bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
a_ = [1] * len(self.prefix_tokens)
a_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(a)) + suffix_ones
return prefix_ones + ([0] * len(a)) + ([0] * len(a)) + suffix_ones
def _lowerCAmelCase ( self: Union[str, Any] , a: List[int] , a: Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCAmelCase ( self: Dict) ->Dict:
'''simple docstring'''
a_ = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self: List[str]) ->Dict:
'''simple docstring'''
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self: Optional[Any] , a: Dict) ->None:
'''simple docstring'''
a_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a_ = {}
a_ = load_spm(self.spm_file , self.sp_model_kwargs)
def _lowerCAmelCase ( self: List[Any] , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = Path(a)
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""")
a_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a)
if os.path.abspath(self.spm_file) != os.path.abspath(a) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a)
elif not os.path.isfile(self.spm_file):
with open(a , "wb") as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(a)
return (str(a), str(a))
def _lowerCAmelCase ( self: Dict , a: List[str] , a: str = "en" , a: Optional[List[str]] = None , a: str = "ro" , **a: List[Any] , ) ->BatchEncoding:
'''simple docstring'''
a_ = src_lang
a_ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seqaseq_batch(a , a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Any , a: Optional[str] , a: Optional[str] , **a: Tuple) ->List[str]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
a_ = src_lang
a_ = self(a , add_special_tokens=a , **a)
a_ = self.get_lang_id(a)
a_ = tgt_lang_id
return inputs
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang)
def _lowerCAmelCase ( self: Optional[int]) ->Any:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCAmelCase ( self: str , a: str) ->None:
'''simple docstring'''
a_ = self.get_lang_token(a)
a_ = self.lang_token_to_id[lang_token]
a_ = [self.cur_lang_id]
a_ = [self.eos_token_id]
def _lowerCAmelCase ( self: Union[str, Any] , a: str) ->None:
'''simple docstring'''
a_ = self.get_lang_token(a)
a_ = self.lang_token_to_id[lang_token]
a_ = [self.cur_lang_id]
a_ = [self.eos_token_id]
def _lowerCAmelCase ( self: Tuple , a: str) ->str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def _lowerCAmelCase ( self: Optional[int] , a: str) ->int:
'''simple docstring'''
a_ = self.get_lang_token(a)
return self.lang_token_to_id[lang_token]
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
a_ = sentencepiece.SentencePieceProcessor(**lowercase__ )
spm.Load(str(lowercase__ ) )
return spm
def __UpperCAmelCase (lowercase__ ) -> Union[Dict, List]:
'''simple docstring'''
with open(lowercase__ ,"r" ) as f:
return json.load(lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> None:
'''simple docstring'''
with open(lowercase__ ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ ,indent=2 )
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
a_ = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''perceiver'''
def __init__( self: Union[str, Any] , a: str=2_56 , a: Optional[int]=12_80 , a: Optional[Any]=7_68 , a: str=1 , a: int=26 , a: List[str]=8 , a: Any=8 , a: Dict=None , a: Dict=None , a: Optional[int]="kv" , a: Dict=1 , a: Optional[int]=1 , a: str="gelu" , a: Dict=0.1 , a: Optional[Any]=0.02 , a: Any=1e-12 , a: Dict=True , a: Optional[Any]=2_62 , a: str=20_48 , a: Any=56 , a: List[str]=[3_68, 4_96] , a: Optional[Any]=16 , a: List[str]=19_20 , a: Any=16 , a: Tuple=[1, 16, 2_24, 2_24] , **a: Dict , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**a)
a_ = num_latents
a_ = d_latents
a_ = d_model
a_ = num_blocks
a_ = num_self_attends_per_block
a_ = num_self_attention_heads
a_ = num_cross_attention_heads
a_ = qk_channels
a_ = v_channels
a_ = cross_attention_shape_for_attention
a_ = self_attention_widening_factor
a_ = cross_attention_widening_factor
a_ = hidden_act
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = use_query_residual
# masked language modeling attributes
a_ = vocab_size
a_ = max_position_embeddings
# image classification attributes
a_ = image_size
# flow attributes
a_ = train_size
# multimodal autoencoding attributes
a_ = num_frames
a_ = audio_samples_per_frame
a_ = samples_per_patch
a_ = output_shape
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
@property
def _lowerCAmelCase ( self: Any) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
])
@property
def _lowerCAmelCase ( self: Any) ->float:
'''simple docstring'''
return 1e-4
def _lowerCAmelCase ( self: List[Any] , a: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a: int = -1 , a: int = -1 , a: int = -1 , a: bool = False , a: Optional[TensorType] = None , a: int = 3 , a: int = 40 , a: int = 40 , ) ->Mapping[str, Any]:
'''simple docstring'''
if isinstance(a , a):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ = preprocessor.num_special_tokens_to_add(a)
a_ = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a)
# Generate dummy inputs according to compute batch and sequence
a_ = [" ".join(["a"]) * seq_length] * batch_size
a_ = dict(preprocessor(a , return_tensors=a))
a_ = inputs.pop("input_ids")
return inputs
elif isinstance(a , a) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ = compute_effective_axis_dimension(a , fixed_dimension=OnnxConfig.default_fixed_batch)
a_ = self._generate_dummy_images(a , a , a , a)
a_ = dict(preprocessor(images=a , return_tensors=a))
a_ = inputs.pop("pixel_values")
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.")
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =BlenderbotSmallConfig
_UpperCAmelCase ={}
_UpperCAmelCase ='''gelu'''
def __init__( self: List[str] , a: str , a: Tuple=13 , a: Any=7 , a: Union[str, Any]=True , a: str=False , a: str=99 , a: Union[str, Any]=32 , a: List[Any]=2 , a: Dict=4 , a: Optional[int]=37 , a: Optional[int]=0.1 , a: Any=0.1 , a: str=20 , a: Dict=2 , a: Optional[Any]=1 , a: Any=0 , ) ->Tuple:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
def _lowerCAmelCase ( self: Tuple) ->str:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
a_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
a_ = tf.concat([input_ids, eos_tensor] , axis=1)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a_ = prepare_blenderbot_small_inputs_dict(a , a , a)
return config, inputs_dict
def _lowerCAmelCase ( self: Optional[int] , a: List[str] , a: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = TFBlenderbotSmallModel(config=a).get_decoder()
a_ = inputs_dict["input_ids"]
a_ = input_ids[:1, :]
a_ = inputs_dict["attention_mask"][:1, :]
a_ = inputs_dict["head_mask"]
a_ = 1
# first forward pass
a_ = model(a , attention_mask=a , head_mask=a , use_cache=a)
a_ , a_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
a_ = tf.concat([input_ids, next_tokens] , axis=-1)
a_ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
a_ = model(a , attention_mask=a)[0]
a_ = model(a , attention_mask=a , past_key_values=a)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
a_ = int(ids_tensor((1,) , output_from_past.shape[-1]))
a_ = output_from_no_past[:, -3:, random_slice_idx]
a_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1e-3)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__=None ,lowercase__=None ,lowercase__=None ,lowercase__=None ,lowercase__=None ,) -> str:
'''simple docstring'''
if attention_mask is None:
a_ = tf.cast(tf.math.not_equal(lowercase__ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
a_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
a_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_UpperCAmelCase =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase =(
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
a_ = TFBlenderbotSmallModelTester(self)
a_ = ConfigTester(self , config_class=a)
def _lowerCAmelCase ( self: Dict) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: str) ->List[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a)
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_UpperCAmelCase =[
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
_UpperCAmelCase ='''facebook/blenderbot_small-90M'''
@cached_property
def _lowerCAmelCase ( self: Union[str, Any]) ->Dict:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
@cached_property
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
a_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def _lowerCAmelCase ( self: str) ->Tuple:
'''simple docstring'''
a_ = self.tokenizer(self.src_text , return_tensors="tf")
a_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a , )
a_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
if isinstance(lowercase__ ,lowercase__ ):
a_ = np.full((len(lowercase__ ), sequence_length, 2) ,lowercase__ )
else:
a_ = np.full((len(lowercase__ ), sequence_length) ,lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ ,lowercase__ ):
a_ = tensor[:sequence_length]
else:
a_ = tensor[:sequence_length]
else:
if isinstance(lowercase__ ,lowercase__ ):
a_ = tensor[:sequence_length]
else:
a_ = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = ord(lowercase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
a_ = unicodedata.category(lowercase__ )
if cat.startswith("P" ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =42
_UpperCAmelCase =True
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =-100
_UpperCAmelCase ="pt"
def _lowerCAmelCase ( self: Tuple , a: Optional[int]) ->Optional[int]:
'''simple docstring'''
import torch
a_ = "label" if "label" in features[0].keys() else "labels"
a_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
a_ = self.tokenizer.pad(
a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
a_ = torch.tensor(batch["entity_ids"]).shape[1]
a_ = self.tokenizer.padding_side
if padding_side == "right":
a_ = [
list(a) + [self.label_pad_token_id] * (sequence_length - len(a)) for label in labels
]
else:
a_ = [
[self.label_pad_token_id] * (sequence_length - len(a)) + list(a) for label in labels
]
a_ = [feature["ner_tags"] for feature in features]
a_ = padding_tensor(a , -1 , a , a)
a_ = [feature["original_entity_spans"] for feature in features]
a_ = padding_tensor(a , (-1, -1) , a , a)
a_ = {k: torch.tensor(a , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
a_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCAmelCase =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =field(default=lowercase_ , metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowerCAmelCase ( self: str) ->Tuple:
'''simple docstring'''
if self.train_file is not None:
a_ = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a_ = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =True
_UpperCAmelCase =None
_UpperCAmelCase =None
def __call__( self: Optional[Any] , a: str) ->str:
'''simple docstring'''
a_ = "label" if "label" in features[0].keys() else "labels"
a_ = [feature.pop(a) for feature in features]
a_ = len(a)
a_ = len(features[0]["input_ids"])
a_ = [
[{k: v[i] for k, v in feature.items()} for i in range(a)] for feature in features
]
a_ = list(chain(*a))
a_ = self.tokenizer.pad(
a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
a_ = {k: v.view(a , a , -1) for k, v in batch.items()}
# Add back labels
a_ = torch.tensor(a , dtype=torch.intaa)
return batch
def __UpperCAmelCase () -> Tuple:
'''simple docstring'''
a_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" ,lowercase__ ,lowercase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a_ = {}
if data_args.train_file is not None:
a_ = data_args.train_file
if data_args.validation_file is not None:
a_ = data_args.validation_file
a_ = data_args.train_file.split("." )[-1]
a_ = load_dataset(
lowercase__ ,data_files=lowercase__ ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
a_ = load_dataset(
"swag" ,"regular" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
a_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
a_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=lowercase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a_ = [F"""ending{i}""" for i in range(4 )]
a_ = "sent1"
a_ = "sent2"
if data_args.max_seq_length is None:
a_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
a_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
a_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
a_ = [[context] * 4 for context in examples[context_name]]
a_ = examples[question_header_name]
a_ = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
a_ = list(chain(*lowercase__ ) )
a_ = list(chain(*lowercase__ ) )
# Tokenize
a_ = tokenizer(
lowercase__ ,lowercase__ ,truncation=lowercase__ ,max_length=lowercase__ ,padding="max_length" if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(lowercase__ ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
a_ = raw_datasets["train"]
if data_args.max_train_samples is not None:
a_ = min(len(lowercase__ ) ,data_args.max_train_samples )
a_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
a_ = train_dataset.map(
lowercase__ ,batched=lowercase__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
a_ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
a_ = min(len(lowercase__ ) ,data_args.max_eval_samples )
a_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
a_ = eval_dataset.map(
lowercase__ ,batched=lowercase__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
a_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
a_ , a_ = eval_predictions
a_ = np.argmax(lowercase__ ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a_ = Trainer(
model=lowercase__ ,args=lowercase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=lowercase__ ,data_collator=lowercase__ ,compute_metrics=lowercase__ ,)
# Training
if training_args.do_train:
a_ = None
if training_args.resume_from_checkpoint is not None:
a_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ = last_checkpoint
a_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
a_ = train_result.metrics
a_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
a_ = min(lowercase__ ,len(lowercase__ ) )
trainer.log_metrics("train" ,lowercase__ )
trainer.save_metrics("train" ,lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ = trainer.evaluate()
a_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
a_ = min(lowercase__ ,len(lowercase__ ) )
trainer.log_metrics("eval" ,lowercase__ )
trainer.save_metrics("eval" ,lowercase__ )
a_ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase () -> str:
'''simple docstring'''
a_ = ArgumentParser("Transformers CLI tool" ,usage="transformers-cli <command> [<args>]" )
a_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
a_ = parser.parse_args()
if not hasattr(lowercase__ ,"func" ):
parser.print_help()
exit(1 )
# Run
a_ = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _lowerCAmelCase ( *a: Union[str, Any] , **a: List[Any]) ->str:
'''simple docstring'''
pass
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_UpperCAmelCase =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self: List[str] , a: Optional[Any] , a: List[Any] , a: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = pipeline(
"document-question-answering" , model=a , tokenizer=a , image_processor=a)
a_ = INVOICE_URL
a_ = list(zip(*apply_tesseract(load_image(a) , a , "")))
a_ = "What is the placebo?"
a_ = [
{
"image": load_image(a),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowerCAmelCase ( self: Dict , a: Optional[Any] , a: Optional[Any]) ->int:
'''simple docstring'''
a_ = dqa_pipeline(a , top_k=2)
self.assertEqual(
a , [
[
{"score": ANY(a), "answer": ANY(a), "start": ANY(a), "end": ANY(a)},
{"score": ANY(a), "answer": ANY(a), "start": ANY(a), "end": ANY(a)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self: str) ->str:
'''simple docstring'''
a_ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2")
a_ = INVOICE_URL
a_ = "How many cats are there?"
a_ = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a_ = dqa_pipeline(image=a , question=a , top_k=2)
self.assertEqual(nested_simplify(a , decimals=4) , a)
a_ = dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(nested_simplify(a , decimals=4) , a)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a_ = dqa_pipeline(image=a , question=a , top_k=2)
self.assertEqual(a , [])
# We can optionnally pass directly the words and bounding boxes
a_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a_ = []
a_ = []
a_ = dqa_pipeline(image=a , question=a , words=a , boxes=a , top_k=2)
self.assertEqual(a , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a_ = INVOICE_URL
a_ = "What is the invoice number?"
a_ = dqa_pipeline(image=a , question=a , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
a_ = dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
a_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a_ = INVOICE_URL
a_ = "What is the invoice number?"
a_ = dqa_pipeline(image=a , question=a , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
a_ = dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
a_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a)
a_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a , revision="3dc6de3" , )
a_ = INVOICE_URL
a_ = "What is the invoice number?"
a_ = dqa_pipeline(image=a , question=a , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a_ = dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a_ = list(zip(*apply_tesseract(load_image(a) , a , "")))
# This model should also work if `image` is set to None
a_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a)
a_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a , revision="3dc6de3" , max_seq_len=50 , )
a_ = INVOICE_URL
a_ = "What is the invoice number?"
a_ = dqa_pipeline(image=a , question=a , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
a_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a_ = list(zip(*apply_tesseract(load_image(a) , a , "")))
# This model should also work if `image` is set to None
a_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(a , decimals=4) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
a_ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a_ = INVOICE_URL
a_ = "What is the invoice number?"
a_ = dqa_pipeline(image=a , question=a , top_k=2)
self.assertEqual(nested_simplify(a , decimals=4) , [{"answer": "us-001"}])
@require_tf
@unittest.skip("Document question answering not implemented in TF")
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
pass
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
a_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
a_ = v
else:
a_ = v
a_ = chkpt["params"]
a_ = {n: v for n, v in config.items() if not isinstance(lowercase__ ,(torch.FloatTensor, numpy.ndarray) )}
a_ = chkpt["dico_word2id"]
a_ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" ,"" ): i for s, i in vocab.items()}
# Save pytorch-model
a_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
a_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
a_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowercase__ ,lowercase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ ,indent=2 ) + "\n" )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ ,indent=2 ) + "\n" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''new-model'''
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
a_ = "bert-base-cased"
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
a_ = "bert-base-cased"
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForPreTraining.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForCausalLM.from_pretrained(a)
a_ , a_ = TFAutoModelForCausalLM.from_pretrained(a , output_loading_info=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: str) ->List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelWithLMHead.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForMaskedLM.from_pretrained(a)
a_ , a_ = TFAutoModelForMaskedLM.from_pretrained(a , output_loading_info=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForSeqaSeqLM.from_pretrained(a)
a_ , a_ = TFAutoModelForSeqaSeqLM.from_pretrained(a , output_loading_info=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForSequenceClassification.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Tuple) ->List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForQuestionAnswering.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
@require_tensorflow_probability
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForTableQuestionAnswering.from_pretrained(a)
a_ , a_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
a , output_loading_info=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
def _lowerCAmelCase ( self: Tuple) ->List[str]:
'''simple docstring'''
a_ = TFAutoModelWithLMHead.from_pretrained(a)
self.assertIsInstance(a , a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=a) , 1_44_10)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
a_ = TFAutoModelWithLMHead.from_pretrained(a)
self.assertIsInstance(a , a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=a) , 1_44_10)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(a , a)
a_ = copy.deepcopy(model.config)
a_ = ["FunnelBaseModel"]
a_ = TFAutoModel.from_config(a)
self.assertIsInstance(a , a)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a)
a_ = TFAutoModel.from_pretrained(a)
self.assertIsInstance(a , a)
def _lowerCAmelCase ( self: Any) ->Tuple:
'''simple docstring'''
try:
AutoConfig.register("new-model" , a)
a_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(a):
auto_class.register(a , a)
auto_class.register(a , a)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a):
auto_class.register(a , a)
# Now that the config is registered, it can be used as any other config with the auto-API
a_ = BertModelTester(self).get_config()
a_ = NewModelConfig(**tiny_config.to_dict())
a_ = auto_class.from_config(a)
self.assertIsInstance(a , a)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a)
a_ = auto_class.from_pretrained(a)
self.assertIsInstance(a , a)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
a , "bert-base is not a local folder and is not a valid model identifier"):
a_ = TFAutoModel.from_pretrained("bert-base")
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
with self.assertRaisesRegex(
a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
a_ = TFAutoModel.from_pretrained(a , revision="aaaaaa")
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
a , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
a_ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model")
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
with self.assertRaisesRegex(a , "Use `from_pt=True` to load this model"):
a_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
a_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
a_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
a_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
with RequestCounter() as counter:
a_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ = '<<<<<<< This should probably be modified because it mentions: '
a_ = '=======\n>>>>>>>\n'
a_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
a_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
return ConvertCommand(args.tfds_path ,args.datasets_directory )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
@staticmethod
def _lowerCAmelCase ( a: ArgumentParser) ->Tuple:
'''simple docstring'''
a_ = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=a , required=a , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=a , required=a , help="Path to the HuggingFace Datasets folder.")
train_parser.set_defaults(func=a)
def __init__( self: Optional[Any] , a: str , a: str , *a: Tuple) ->List[Any]:
'''simple docstring'''
a_ = get_logger("datasets-cli/converting")
a_ = tfds_path
a_ = datasets_directory
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
if os.path.isdir(self._tfds_path):
a_ = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
a_ = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
a_ = os.path.abspath(self._datasets_directory)
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""")
a_ = []
a_ = []
a_ = {}
if os.path.isdir(self._tfds_path):
a_ = os.listdir(a)
else:
a_ = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""")
a_ = os.path.join(a , a)
a_ = os.path.join(a , a)
if not os.path.isfile(a) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(a , encoding="utf-8") as f:
a_ = f.readlines()
a_ = []
a_ = False
a_ = False
a_ = []
for line in lines:
a_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a_ = ""
continue
elif "from absl import logging" in out_line:
a_ = "from datasets import logging\n"
elif "getLogger" in out_line:
a_ = out_line.replace("getLogger" , "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
a_ = True
a_ = list(filter(lambda a: e in out_line , a))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a) + "\n")
out_lines.append(a)
out_lines.append(a)
continue
else:
for pattern, replacement in TO_CONVERT:
a_ = re.sub(a , a , a)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , a)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
a_ = "from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ = True
out_lines.append(a)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ = f_name.replace(".py" , "")
a_ = os.path.join(a , a)
a_ = os.path.join(a , a)
os.makedirs(a , exist_ok=a)
self._logger.info(f"""Adding directory {output_dir}""")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(a)
if needs_manual_update:
with_manual_update.append(a)
with open(a , "w" , encoding="utf-8") as f:
f.writelines(a)
self._logger.info(f"""Converted in {output_file}""")
for utils_file in utils_files:
try:
a_ = os.path.basename(a)
a_ = imports_to_builder_map[f_name.replace(".py" , "")]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""")
shutil.copy(a , a)
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""")
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> list[int]:
'''simple docstring'''
a_ = len(lowercase__ )
for i in range(lowercase__ ):
for j in range(i + 1 ,lowercase__ ):
if numbers[j] < numbers[i]:
a_ , a_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F"""{src_lang}-{tgt_lang}"""
a_ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=lowercase__ ,exist_ok=lowercase__ )
a_ = os.path.join(lowercase__ ,"README.md" )
print(F"""Generating {path}""" )
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write(lowercase__ )
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
a_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
a_ = ['a', 'b', 'c', 'd', 'e']
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
a_ = start
# add current to visited
visited.append(lowercase__ )
a_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
a_ = topological_sort(lowercase__ ,lowercase__ ,lowercase__ )
# if all neighbors visited add current to sort
sort.append(lowercase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowercase__ ) != len(lowercase__ ):
for vertice in vertices:
if vertice not in visited:
a_ = topological_sort(lowercase__ ,lowercase__ ,lowercase__ )
# return sort
return sort
if __name__ == "__main__":
a_ = topological_sort('a', [], [])
print(sort)
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''roberta-prelayernorm'''
def __init__( self: Tuple , a: Tuple=5_02_65 , a: List[Any]=7_68 , a: Tuple=12 , a: Dict=12 , a: Optional[int]=30_72 , a: Any="gelu" , a: int=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: Tuple=2 , a: Any=0.02 , a: str=1e-12 , a: List[str]=1 , a: int=0 , a: Union[str, Any]=2 , a: Tuple="absolute" , a: Any=True , a: Any=None , **a: Dict , ) ->Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
@property
def _lowerCAmelCase ( self: Dict) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
a_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
a_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> list[int]:
'''simple docstring'''
a_ = True
a_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase__ ,lowercase__ ,lowercase__ )
order.append(lowercase__ )
return order
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> list[int]:
'''simple docstring'''
a_ = True
a_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase__ ,lowercase__ ,lowercase__ )
return component
def __UpperCAmelCase (lowercase__ ) -> list[list[int]]:
'''simple docstring'''
a_ = len(lowercase__ ) * [False]
a_ = {vert: [] for vert in range(len(lowercase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase__ )
a_ = []
for i, was_visited in enumerate(lowercase__ ):
if not was_visited:
order += topology_sort(lowercase__ ,lowercase__ ,lowercase__ )
a_ = []
a_ = len(lowercase__ ) * [False]
for i in range(len(lowercase__ ) ):
a_ = order[len(lowercase__ ) - i - 1]
if not visited[vert]:
a_ = find_components(lowercase__ ,lowercase__ ,lowercase__ )
components_list.append(lowercase__ )
return components_list
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a_ = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''albert'''
def __init__( self: Dict , a: List[str]=3_00_00 , a: Union[str, Any]=1_28 , a: Union[str, Any]=40_96 , a: List[Any]=12 , a: Optional[int]=1 , a: Tuple=64 , a: Any=1_63_84 , a: Union[str, Any]=1 , a: int="gelu_new" , a: Dict=0 , a: Dict=0 , a: Tuple=5_12 , a: int=2 , a: int=0.02 , a: str=1e-12 , a: Any=0.1 , a: int="absolute" , a: Tuple=0 , a: List[Any]=2 , a: Tuple=3 , **a: int , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = embedding_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_hidden_groups
a_ = num_attention_heads
a_ = inner_group_num
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = classifier_dropout_prob
a_ = position_embedding_type
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
@property
def _lowerCAmelCase ( self: str) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
_UpperCAmelCase =field(default=lowercase_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_UpperCAmelCase =field(default=lowercase_ , metadata={'''help''': '''whether to use adafactor'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
_UpperCAmelCase =field(default=lowercase_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
_UpperCAmelCase =field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
if hor == 128:
a_ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a_ = (32, 128, 256)
a_ = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a_ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a_ = (32, 64, 128, 256)
a_ = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a_ = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
a_ = model.state_dict()
a_ = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a_ = UNetaDModel(**lowercase__ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
a_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a_ = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() ,F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
def __UpperCAmelCase () -> int:
'''simple docstring'''
a_ = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a_ = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
a_ = model
a_ = UNetaDModel(**lowercase__ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
a_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a_ = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() ,"hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
from typing import Any
import numpy as np
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ ,matrix.conjugate().T )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = v.conjugate().T
a_ = v_star.dot(lowercase__ )
assert isinstance(lowercase__ ,np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def __UpperCAmelCase () -> None:
'''simple docstring'''
a_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ ,lowercase__ ) )
a_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ ,lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a_ = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
a_ = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
a_ = max(len(lowercase__ ) ,len(lowercase__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) ,b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =field(default_factory=lowercase_ )
_UpperCAmelCase =field(default_factory=lowercase_ )
def _lowerCAmelCase ( self: Optional[int] , a: List[Any] , a: Tensor , a: Tensor) ->Tuple:
'''simple docstring'''
a_ = len(list(m.modules())) == 1 or isinstance(a , nn.Convad) or isinstance(a , nn.BatchNormad)
if has_not_submodules:
self.traced.append(a)
def __call__( self: str , a: Tensor) ->Dict:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(a)
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
return list(filter(lambda a: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =42
_UpperCAmelCase =0
_UpperCAmelCase =field(default_factory=lowercase_ )
_UpperCAmelCase =field(default_factory=lowercase_ )
def __call__( self: List[str] , a: Tensor) ->int:
'''simple docstring'''
a_ = Tracker(self.dest)(a).parametrized
a_ = Tracker(self.src)(a).parametrized
a_ = list(filter(lambda a: type(a) not in self.src_skip , a))
a_ = list(filter(lambda a: type(a) not in self.dest_skip , a))
if len(a) != len(a):
raise Exception(
f"""Numbers of operations are different. Source module has {len(a)} operations while"""
f""" destination module has {len(a)}.""")
for dest_m, src_m in zip(a , a):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""")
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = True ) -> Union[str, Any]:
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
a_ = timm.create_model(lowercase__ ,pretrained=lowercase__ ).eval()
a_ = ResNetForImageClassification(lowercase__ ).eval()
a_ = ModuleTransfer(src=lowercase__ ,dest=lowercase__ )
a_ = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase__ )
assert torch.allclose(from_model(lowercase__ ) ,our_model(lowercase__ ).logits ), "The model logits don't match the original one."
a_ = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(lowercase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=lowercase__ ,)
# we can use the convnext one
a_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=lowercase__ ,)
print(F"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase (lowercase__ ,lowercase__ = None ,lowercase__ = True ) -> int:
'''simple docstring'''
a_ = "imagenet-1k-id2label.json"
a_ = 1000
a_ = (1, num_labels)
a_ = "huggingface/label-files"
a_ = num_labels
a_ = json.load(open(hf_hub_download(lowercase__ ,lowercase__ ,repo_type="dataset" ) ,"r" ) )
a_ = {int(lowercase__ ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
a_ = partial(lowercase__ ,num_labels=lowercase__ ,idalabel=lowercase__ ,labelaid=lowercase__ )
a_ = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(lowercase__ ,names_to_config[model_name] ,lowercase__ ,lowercase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
return config, expected_shape
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
a_ = parser.parse_args()
a_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self: Dict , a: Union[str, Any] , a: int=13 , a: Any=7 , a: int=True , a: List[Any]=True , a: int=True , a: Optional[int]=True , a: List[Any]=99 , a: Tuple=32 , a: List[str]=5 , a: Union[str, Any]=4 , a: Tuple=37 , a: Optional[Any]="gelu" , a: Any=0.1 , a: List[Any]=0.1 , a: List[Any]=5_12 , a: List[str]=16 , a: Optional[Any]=2 , a: str=0.02 , a: Optional[int]=4 , ) ->Union[str, Any]:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_attention_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_choices
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_attention_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a , )
return config, input_ids, attention_mask
def _lowerCAmelCase ( self: Union[str, Any]) ->List[str]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = FlaxDistilBertModelTester(self)
@slow
def _lowerCAmelCase ( self: Union[str, Any]) ->Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained("distilbert-base-uncased")
a_ = model(np.ones((1, 1)))
self.assertIsNotNone(a)
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: Any) ->str:
'''simple docstring'''
a_ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased")
a_ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
a_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
a_ = model(a , attention_mask=a)[0]
a_ = (1, 11, 7_68)
self.assertEqual(output.shape , a)
a_ = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4))
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase () -> str:
'''simple docstring'''
a_ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" ,type=lowercase__ ,default=1 ,help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" ,type=lowercase__ ,help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) ,)
# rest from the training program
parser.add_argument("training_script_args" ,nargs=lowercase__ )
return parser.parse_args()
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = parse_args()
# Import training_script as a module.
a_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a_ = script_fpath.stem
a_ = importlib.import_module(lowercase__ )
# Patch sys.argv
a_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(lowercase_ )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''rag'''
_UpperCAmelCase =True
def __init__( self: Tuple , a: Tuple=None , a: Tuple=True , a: Any=None , a: Tuple=None , a: Union[str, Any]=None , a: int=None , a: Optional[Any]=None , a: Optional[int]=" / " , a: Optional[int]=" // " , a: Any=5 , a: Union[str, Any]=3_00 , a: str=7_68 , a: List[str]=8 , a: str="wiki_dpr" , a: Union[str, Any]="train" , a: List[Any]="compressed" , a: List[str]=None , a: Any=None , a: str=False , a: Dict=False , a: List[str]=0.0 , a: List[str]=True , a: Tuple=False , a: str=False , a: List[Any]=False , a: Union[str, Any]=True , a: str=None , **a: Any , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(
bos_token_id=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , forced_eos_token_id=a , is_encoder_decoder=a , prefix=a , vocab_size=a , **a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
a_ = kwargs.pop("question_encoder")
a_ = question_encoder_config.pop("model_type")
a_ = kwargs.pop("generator")
a_ = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
a_ = AutoConfig.for_model(a , **a)
a_ = AutoConfig.for_model(a , **a)
a_ = reduce_loss
a_ = label_smoothing
a_ = exclude_bos_score
a_ = do_marginalize
a_ = title_sep
a_ = doc_sep
a_ = n_docs
a_ = max_combined_length
a_ = dataset
a_ = dataset_split
a_ = index_name
a_ = retrieval_vector_size
a_ = retrieval_batch_size
a_ = passages_path
a_ = index_path
a_ = use_dummy_dataset
a_ = output_retrieved
a_ = do_deduplication
a_ = use_cache
if self.forced_eos_token_id is None:
a_ = getattr(self.generator , "forced_eos_token_id" , a)
@classmethod
def _lowerCAmelCase ( cls: Optional[int] , a: PretrainedConfig , a: PretrainedConfig , **a: List[str]) ->PretrainedConfig:
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a)
def _lowerCAmelCase ( self: List[Any]) ->Tuple:
'''simple docstring'''
a_ = copy.deepcopy(self.__dict__)
a_ = self.question_encoder.to_dict()
a_ = self.generator.to_dict()
a_ = self.__class__.model_type
return output
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
if not sentence:
return ""
a_ = dict(zip(lowercase__ ,lowercase__ ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
# TODO Update this
a_ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''esm'''
def __init__( self: int , a: int=None , a: List[Any]=None , a: Tuple=None , a: Dict=7_68 , a: Tuple=12 , a: Union[str, Any]=12 , a: int=30_72 , a: Tuple=0.1 , a: List[Any]=0.1 , a: Optional[int]=10_26 , a: int=0.02 , a: List[Any]=1e-12 , a: str="absolute" , a: Any=True , a: Tuple=None , a: Dict=False , a: Any=False , a: Any=None , a: Optional[Any]=None , **a: Optional[int] , ) ->Any:
'''simple docstring'''
super().__init__(pad_token_id=a , mask_token_id=a , **a)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = emb_layer_norm_before
a_ = token_dropout
a_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values.")
a_ = EsmFoldConfig()
elif isinstance(a , a):
a_ = EsmFoldConfig(**a)
a_ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
a_ = get_default_vocab_list()
else:
a_ = vocab_list
else:
a_ = None
a_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , a):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
a_ = super().to_dict()
if isinstance(self.esmfold_config , a):
a_ = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =None
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =0
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =128
_UpperCAmelCase =None
def _lowerCAmelCase ( self: Dict) ->Dict:
'''simple docstring'''
if self.trunk is None:
a_ = TrunkConfig()
elif isinstance(self.trunk , a):
a_ = TrunkConfig(**self.trunk)
def _lowerCAmelCase ( self: Optional[Any]) ->List[Any]:
'''simple docstring'''
a_ = asdict(self)
a_ = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =48
_UpperCAmelCase =1024
_UpperCAmelCase =128
_UpperCAmelCase =32
_UpperCAmelCase =32
_UpperCAmelCase =32
_UpperCAmelCase =0
_UpperCAmelCase =0
_UpperCAmelCase =False
_UpperCAmelCase =4
_UpperCAmelCase =128
_UpperCAmelCase =None
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
if self.structure_module is None:
a_ = StructureModuleConfig()
elif isinstance(self.structure_module , a):
a_ = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""")
a_ = self.sequence_state_dim // self.sequence_head_width
a_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""")
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""")
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = asdict(self)
a_ = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =384
_UpperCAmelCase =128
_UpperCAmelCase =16
_UpperCAmelCase =128
_UpperCAmelCase =12
_UpperCAmelCase =4
_UpperCAmelCase =8
_UpperCAmelCase =0.1
_UpperCAmelCase =8
_UpperCAmelCase =1
_UpperCAmelCase =2
_UpperCAmelCase =7
_UpperCAmelCase =10
_UpperCAmelCase =1e-8
_UpperCAmelCase =1e5
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
return asdict(self)
def __UpperCAmelCase () -> Optional[int]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
a_ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
a_ = hex_num[0] == "-"
if is_negative:
a_ = hex_num[1:]
try:
a_ = int(lowercase__ ,16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
a_ = ""
while int_num > 0:
a_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class SCREAMING_SNAKE_CASE__ :
def __init__( self: List[str] , a: Optional[Any] , a: Union[str, Any] , a: bool = True , a: bool = False) ->Any:
'''simple docstring'''
a_ = scheduler
a_ = optimizers if isinstance(a , (list, tuple)) else [optimizers]
a_ = split_batches
a_ = step_with_optimizer
a_ = GradientState()
def _lowerCAmelCase ( self: Optional[Any] , *a: str , **a: Optional[int]) ->Optional[Any]:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a , **a)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a , **a)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
a_ = AcceleratorState().num_processes
for _ in range(a):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps"):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a , **a)
else:
self.scheduler.step(*a , **a)
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
return self.scheduler.state_dict()
def _lowerCAmelCase ( self: List[str] , a: List[str]) ->Optional[int]:
'''simple docstring'''
self.scheduler.load_state_dict(a)
def _lowerCAmelCase ( self: Tuple) ->int:
'''simple docstring'''
return self.scheduler.get_lr()
def _lowerCAmelCase ( self: Dict , *a: int , **a: Any) ->Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*a , **a)
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase ='''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCAmelCase ( self: str , a: Optional[int]=0) ->int:
'''simple docstring'''
a_ = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(a))
a_ = np.random.RandomState(a)
a_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=a)
a_ = self.get_dummy_inputs()
a_ = pipe(**a).images
a_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
a_ = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
a_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a)
pipe.set_progress_bar_config(disable=a)
a_ = self.get_dummy_inputs()
a_ = pipe(**a).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a_ = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _lowerCAmelCase ( self: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
a_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a)
# warmup pass to apply optimizations
a_ = pipe(**self.get_dummy_inputs())
a_ = self.get_dummy_inputs()
a_ = pipe(**a).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a_ = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _lowerCAmelCase ( self: List[Any]) ->str:
'''simple docstring'''
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
a_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a)
a_ = self.get_dummy_inputs()
a_ = pipe(**a).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a_ = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _lowerCAmelCase ( self: Tuple) ->Optional[int]:
'''simple docstring'''
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
a_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a)
a_ = self.get_dummy_inputs()
a_ = pipe(**a).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a_ = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _lowerCAmelCase ( self: Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
a_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a)
a_ = self.get_dummy_inputs()
a_ = pipe(**a).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a_ = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def _lowerCAmelCase ( self: Optional[Any]) ->List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase ( self: str) ->str:
'''simple docstring'''
a_ = ort.SessionOptions()
a_ = False
return options
def _lowerCAmelCase ( self: List[Any]) ->int:
'''simple docstring'''
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
a_ = init_image.resize((7_68, 5_12))
# using the PNDM scheduler by default
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a)
a_ = "A fantasy landscape, trending on artstation"
a_ = np.random.RandomState(0)
a_ = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
a_ = output.images
a_ = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
a_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
a_ = init_image.resize((7_68, 5_12))
a_ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
a_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a)
a_ = "A fantasy landscape, trending on artstation"
a_ = np.random.RandomState(0)
a_ = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
a_ = output.images
a_ = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
a_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {
'facebook/blenderbot_small-90M': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =BlenderbotSmallTokenizer
def __init__( self: str , a: Optional[Any]=None , a: Optional[int]=None , a: Tuple="<|endoftext|>" , a: Any="<|endoftext|>" , a: int="<|endoftext|>" , a: List[Any]=False , a: Optional[Any]=True , **a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=a , merges=a , add_prefix_space=a , trim_offsets=a , ) , bos_token=a , eos_token=a , unk_token=a , **a , )
a_ = add_prefix_space
def _lowerCAmelCase ( self: Tuple , a: str , a: List[str]=None) ->Dict:
'''simple docstring'''
a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self: Dict , a: List[int] , a: Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = 'RegNetConfig'
# Base docstring
a_ = 'facebook/regnet-y-040'
a_ = [1, 1_088, 7, 7]
# Image classification docstring
a_ = 'facebook/regnet-y-040'
a_ = 'tabby, tabby cat'
a_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , a: int , a: int = 3 , a: int = 1 , a: int = 1 , a: Optional[str] = "relu" , **a: Any , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**a)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
a_ = tf.keras.layers.ConvaD(
filters=a , kernel_size=a , strides=a , padding="VALID" , groups=a , use_bias=a , name="convolution" , )
a_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
a_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCAmelCase ( self: Optional[Any] , a: Tuple) ->Tuple:
'''simple docstring'''
a_ = self.convolution(self.padding(a))
a_ = self.normalization(a)
a_ = self.activation(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Dict , a: RegNetConfig , **a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**a)
a_ = config.num_channels
a_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def _lowerCAmelCase ( self: Optional[Any] , a: Any) ->Optional[int]:
'''simple docstring'''
a_ = shape_list(a)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a_ = tf.transpose(a , perm=(0, 2, 3, 1))
a_ = self.embedder(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Any , a: int , a: int = 2 , **a: int) ->List[str]:
'''simple docstring'''
super().__init__(**a)
a_ = tf.keras.layers.ConvaD(
filters=a , kernel_size=1 , strides=a , use_bias=a , name="convolution")
a_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
def _lowerCAmelCase ( self: Optional[Any] , a: tf.Tensor , a: bool = False) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(a) , training=a)
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Tuple , a: int , a: int , **a: Any) ->Optional[int]:
'''simple docstring'''
super().__init__(**a)
a_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a , name="pooler")
a_ = [
tf.keras.layers.ConvaD(filters=a , kernel_size=1 , activation="relu" , name="attention.0"),
tf.keras.layers.ConvaD(filters=a , kernel_size=1 , activation="sigmoid" , name="attention.2"),
]
def _lowerCAmelCase ( self: List[str] , a: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = self.pooler(a)
for layer_module in self.attention:
a_ = layer_module(a)
a_ = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[int] , a: RegNetConfig , a: int , a: int , a: int = 1 , **a: Any) ->Tuple:
'''simple docstring'''
super().__init__(**a)
a_ = in_channels != out_channels or stride != 1
a_ = max(1 , out_channels // config.groups_width)
a_ = (
TFRegNetShortCut(a , stride=a , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a_ = [
TFRegNetConvLayer(a , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
a , stride=a , groups=a , activation=config.hidden_act , name="layer.1"),
TFRegNetConvLayer(a , kernel_size=1 , activation=a , name="layer.2"),
]
a_ = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self: List[Any] , a: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = hidden_state
for layer_module in self.layers:
a_ = layer_module(a)
a_ = self.shortcut(a)
hidden_state += residual
a_ = self.activation(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , a: RegNetConfig , a: int , a: int , a: int = 1 , **a: List[Any]) ->List[str]:
'''simple docstring'''
super().__init__(**a)
a_ = in_channels != out_channels or stride != 1
a_ = max(1 , out_channels // config.groups_width)
a_ = (
TFRegNetShortCut(a , stride=a , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
a_ = [
TFRegNetConvLayer(a , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
a , stride=a , groups=a , activation=config.hidden_act , name="layer.1"),
TFRegNetSELayer(a , reduced_channels=int(round(in_channels / 4)) , name="layer.2"),
TFRegNetConvLayer(a , kernel_size=1 , activation=a , name="layer.3"),
]
a_ = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self: str , a: Any) ->List[str]:
'''simple docstring'''
a_ = hidden_state
for layer_module in self.layers:
a_ = layer_module(a)
a_ = self.shortcut(a)
hidden_state += residual
a_ = self.activation(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , a: RegNetConfig , a: int , a: int , a: int = 2 , a: int = 2 , **a: Optional[int]) ->List[str]:
'''simple docstring'''
super().__init__(**a)
a_ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
a_ = [
# downsampling is done in the first layer with stride of 2
layer(a , a , a , stride=a , name="layers.0"),
*[layer(a , a , a , name=f"""layers.{i+1}""") for i in range(depth - 1)],
]
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->Optional[int]:
'''simple docstring'''
for layer_module in self.layers:
a_ = layer_module(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Tuple , a: RegNetConfig , **a: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
super().__init__(**a)
a_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ))
a_ = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(a , config.depths[1:])):
self.stages.append(TFRegNetStage(a , a , a , depth=a , name=f"""stages.{i+1}"""))
def _lowerCAmelCase ( self: List[Any] , a: tf.Tensor , a: bool = False , a: bool = True) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
a_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
a_ = stage_module(a)
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=a , hidden_states=a)
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
_UpperCAmelCase =RegNetConfig
def __init__( self: Optional[Any] , a: List[str] , **a: Optional[int]) ->str:
'''simple docstring'''
super().__init__(**a)
a_ = config
a_ = TFRegNetEmbeddings(a , name="embedder")
a_ = TFRegNetEncoder(a , name="encoder")
a_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a , name="pooler")
@unpack_inputs
def _lowerCAmelCase ( self: Optional[int] , a: tf.Tensor , a: Optional[bool] = None , a: Optional[bool] = None , a: bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.embedder(a , training=a)
a_ = self.encoder(
a , output_hidden_states=a , return_dict=a , training=a)
a_ = encoder_outputs[0]
a_ = self.pooler(a)
# Change to NCHW output format have uniformity in the modules
a_ = tf.transpose(a , perm=(0, 3, 1, 2))
a_ = tf.transpose(a , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a_ = tuple([tf.transpose(a , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =RegNetConfig
_UpperCAmelCase ='''regnet'''
_UpperCAmelCase ='''pixel_values'''
@property
def _lowerCAmelCase ( self: Optional[Any]) ->int:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa)}
a_ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , lowercase_ , )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Union[str, Any] , a: RegNetConfig , *a: str , **a: Any) ->Any:
'''simple docstring'''
super().__init__(a , *a , **a)
a_ = TFRegNetMainLayer(a , name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(a)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self: str , a: tf.Tensor , a: Optional[bool] = None , a: Optional[bool] = None , a: List[str]=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.regnet(
pixel_values=a , output_hidden_states=a , return_dict=a , training=a , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase_ , )
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
def __init__( self: List[Any] , a: RegNetConfig , *a: List[str] , **a: Any) ->List[Any]:
'''simple docstring'''
super().__init__(a , *a , **a)
a_ = config.num_labels
a_ = TFRegNetMainLayer(a , name="regnet")
# classification head
a_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self: Any , a: tf.Tensor = None , a: tf.Tensor = None , a: bool = None , a: bool = None , a: Tuple=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.regnet(
a , output_hidden_states=a , return_dict=a , training=a)
a_ = outputs.pooler_output if return_dict else outputs[1]
a_ = self.classifier[0](a)
a_ = self.classifier[1](a)
a_ = None if labels is None else self.hf_compute_loss(labels=a , logits=a)
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a , logits=a , hidden_states=outputs.hidden_states)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
a_ = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
a_ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
a_ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
@lru_cache()
def __UpperCAmelCase () -> Optional[int]:
'''simple docstring'''
a_ = (
list(range(ord("!" ) ,ord("~" ) + 1 ) ) + list(range(ord("¡" ) ,ord("¬" ) + 1 ) ) + list(range(ord("®" ) ,ord("ÿ" ) + 1 ) )
)
a_ = bs[:]
a_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
a_ = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ ,lowercase__ ) )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = set()
a_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ = char
return pairs
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: Optional[Any] , a: int , a: Optional[Any] , a: List[Any]="replace" , a: Union[str, Any]="<s>" , a: Optional[int]="</s>" , a: Optional[Any]="</s>" , a: int="<s>" , a: Tuple="<unk>" , a: int="<pad>" , a: Any="<mask>" , a: Union[str, Any]=False , **a: Any , ) ->Dict:
'''simple docstring'''
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else bos_token
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else eos_token
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else sep_token
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else cls_token
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else unk_token
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8") as vocab_handle:
a_ = json.load(a)
a_ = {v: k for k, v in self.encoder.items()}
a_ = errors # how to handle errors in decoding
a_ = bytes_to_unicode()
a_ = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8") as merges_handle:
a_ = merges_handle.read().split("\n")[1:-1]
a_ = [tuple(merge.split()) for merge in bpe_merges]
a_ = dict(zip(a , range(len(a))))
a_ = {}
a_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _lowerCAmelCase ( self: Dict) ->Optional[Any]:
'''simple docstring'''
return len(self.encoder)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Optional[int] , a: str) ->Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a_ = tuple(a)
a_ = get_pairs(a)
if not pairs:
return token
while True:
a_ = min(a , key=lambda a: self.bpe_ranks.get(a , float("inf")))
if bigram not in self.bpe_ranks:
break
a_ , a_ = bigram
a_ = []
a_ = 0
while i < len(a):
try:
a_ = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a_ = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a_ = tuple(a)
a_ = new_word
if len(a) == 1:
break
else:
a_ = get_pairs(a)
a_ = " ".join(a)
a_ = word
return word
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = []
for token in re.findall(self.pat , a):
a_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a).split(" "))
return bpe_tokens
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token))
def _lowerCAmelCase ( self: Dict , a: Tuple) ->Any:
'''simple docstring'''
return self.decoder.get(a)
def _lowerCAmelCase ( self: Union[str, Any] , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = "".join(a)
a_ = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _lowerCAmelCase ( self: Optional[Any] , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(a , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a) + "\n")
a_ = 0
with open(a , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!")
a_ = token_index
writer.write(" ".join(a) + "\n")
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self: List[str] , a: List[int] , a: Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ = [self.cls_token_id]
a_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self: str , a: List[int] , a: Optional[List[int]] = None , a: bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1, 1] + ([0] * len(a)) + [1]
def _lowerCAmelCase ( self: List[str] , a: List[int] , a: Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCAmelCase ( self: Dict , a: str , a: str=False , **a: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
a_ = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(a) > 0 and not text[0].isspace()):
a_ = " " + text
return (text, kwargs)
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
import qiskit
def __UpperCAmelCase (lowercase__ = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
a_ = qubits
# Using Aer's simulator
a_ = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
a_ = qiskit.QuantumCircuit(lowercase__ ,lowercase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 ,lowercase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,lowercase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowercase__ ) ) ,list(range(lowercase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
a_ = qiskit.execute(lowercase__ ,lowercase__ ,shots=1000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =ConvBertTokenizer
def __init__( self: Union[str, Any] , a: Union[str, Any]=None , a: List[str]=None , a: List[Any]=True , a: Optional[int]="[UNK]" , a: Tuple="[SEP]" , a: Any="[PAD]" , a: List[Any]="[CLS]" , a: int="[MASK]" , a: Tuple=True , a: Any=None , **a: Optional[Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , a) != do_lower_case
or normalizer_state.get("strip_accents" , a) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a) != tokenize_chinese_chars
):
a_ = getattr(a , normalizer_state.pop("type"))
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**a)
a_ = do_lower_case
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: str=None) ->Any:
'''simple docstring'''
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self: Union[str, Any] , a: List[int] , a: Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCAmelCase ( self: Union[str, Any] , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a_ = logging.get_logger(__name__)
a_ = TypeVar('DatasetType', Dataset, IterableDataset)
def __UpperCAmelCase (lowercase__ ,lowercase__ = None ,lowercase__ = None ,lowercase__ = None ,lowercase__ = None ,lowercase__ = "first_exhausted" ,) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(lowercase__ ):
if not isinstance(lowercase__ ,(Dataset, IterableDataset) ):
if isinstance(lowercase__ ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowercase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase__ ).__name__}.""" )
if i == 0:
a_ , a_ = (
(Dataset, IterableDataset) if isinstance(lowercase__ ,lowercase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase__ ,lowercase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase__ ,lowercase__ ,lowercase__ ,info=lowercase__ ,split=lowercase__ ,stopping_strategy=lowercase__ )
else:
return _interleave_iterable_datasets(
lowercase__ ,lowercase__ ,lowercase__ ,info=lowercase__ ,split=lowercase__ ,stopping_strategy=lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ = None ,lowercase__ = None ,lowercase__ = 0 ,) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(lowercase__ ):
if not isinstance(lowercase__ ,(Dataset, IterableDataset) ):
if isinstance(lowercase__ ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowercase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase__ ).__name__}.""" )
if i == 0:
a_ , a_ = (
(Dataset, IterableDataset) if isinstance(lowercase__ ,lowercase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase__ ,lowercase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase__ ,info=lowercase__ ,split=lowercase__ ,axis=lowercase__ )
else:
return _concatenate_iterable_datasets(lowercase__ ,info=lowercase__ ,split=lowercase__ ,axis=lowercase__ )
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a_ = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
return sd
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=rename_keys_prefix ) -> Any:
'''simple docstring'''
a_ = OrderedDict()
a_ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
a_ = key
for name_pair in rename_keys_prefix:
a_ = new_key.replace(name_pair[0] ,name_pair[1] )
a_ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
a_ = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Optional[int]:
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
a_ = "pretraining"
if "vcr" in checkpoint_path:
a_ = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
a_ = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
a_ = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
a_ = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
a_ = {"visual_embedding_dim": 512}
a_ = "multichoice"
elif "vqa_advanced" in checkpoint_path:
a_ = {"visual_embedding_dim": 2048}
a_ = "vqa_advanced"
elif "vqa" in checkpoint_path:
a_ = {"visual_embedding_dim": 2048, "num_labels": 3129}
a_ = "vqa"
elif "nlvr" in checkpoint_path:
a_ = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
a_ = "nlvr"
a_ = VisualBertConfig(**lowercase__ )
# Load State Dict
a_ = load_state_dict(lowercase__ )
a_ = get_new_dict(lowercase__ ,lowercase__ )
if model_type == "pretraining":
a_ = VisualBertForPreTraining(lowercase__ )
elif model_type == "vqa":
a_ = VisualBertForQuestionAnswering(lowercase__ )
elif model_type == "nlvr":
a_ = VisualBertForVisualReasoning(lowercase__ )
elif model_type == "multichoice":
a_ = VisualBertForMultipleChoice(lowercase__ )
model.load_state_dict(lowercase__ )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
a_ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCAmelCase (lowercase__ ) -> dict[str, str]:
'''simple docstring'''
a_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
a_ = remove_duplicates(key.upper() )
a_ = len(lowercase__ )
# First fill cipher with key characters
a_ = {alphabet[i]: char for i, char in enumerate(lowercase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase__ ) ,26 ):
a_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a_ = alphabet[i - offset]
a_ = char
return cipher_alphabet
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(lowercase__ ,lowercase__ ) for ch in message.upper() )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
a_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase__ ,lowercase__ ) for ch in message.upper() )
def __UpperCAmelCase () -> None:
'''simple docstring'''
a_ = input("Enter message to encode or decode: " ).strip()
a_ = input("Enter keyword: " ).strip()
a_ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
a_ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
a_ = create_cipher_map(lowercase__ )
print(func(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
if not isinstance(lowercase__ ,lowercase__ ):
raise TypeError("Input value must be an 'int' type" )
a_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
a_ = {}
with open(lowercase__ ,"r" ) as file:
for line_number, line in enumerate(lowercase__ ):
a_ = line.strip()
if line:
a_ = line.split()
a_ = line_number
a_ = words[0]
a_ = value
return result
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
a_ = getattr(lowercase__ ,lowercase__ )
a_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase__ ):
a_ = PARAM_MAPPING[full_name.split("." )[-1]]
a_ = "param"
if weight_type is not None and weight_type != "param":
a_ = getattr(lowercase__ ,lowercase__ ).shape
elif weight_type is not None and weight_type == "param":
a_ = hf_pointer
for attribute in hf_param_name.split("." ):
a_ = getattr(lowercase__ ,lowercase__ )
a_ = shape_pointer.shape
# let's reduce dimension
a_ = value[0]
else:
a_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
a_ = getattr(lowercase__ ,lowercase__ )
a_ = value
else:
a_ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[int]:
'''simple docstring'''
a_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase__ ):
a_ = PARAM_MAPPING[full_name.split("." )[-1]]
a_ = "param"
if weight_type is not None and weight_type != "param":
a_ = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a_ = ".".join([key, hf_param_name] )
else:
a_ = key
a_ = value if "lm_head" in full_key else value[0]
a_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=None ,lowercase__=None ) -> List[Any]:
'''simple docstring'''
a_ = False
for key, mapped_key in MAPPING.items():
a_ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(lowercase__ )[0].split("." )[-2]
a_ = mapped_key.replace("*" ,lowercase__ )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = "weight"
else:
a_ = None
if hf_dict is not None:
rename_dict(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
set_recursively(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
return is_used
return is_used
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,hf_model.config.feat_extract_norm == "group" ,)
a_ = True
else:
a_ = load_wavaveca_layer(lowercase__ ,lowercase__ ,lowercase__ )
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=None ,lowercase__=None ,lowercase__=True ,lowercase__=False ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
a_ = WavaVecaConfig.from_pretrained(lowercase__ )
else:
a_ = WavaVecaConfig()
if is_seq_class:
a_ = read_txt_into_dict(lowercase__ )
a_ = idalabel
a_ = WavaVecaForSequenceClassification(lowercase__ )
a_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=lowercase__ ,return_attention_mask=lowercase__ ,)
feature_extractor.save_pretrained(lowercase__ )
elif is_finetuned:
if dict_path:
a_ = Dictionary.load(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(lowercase__ ,"vocab.json" )
if not os.path.isdir(lowercase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase__ ) )
return
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 0
a_ = 1
with open(lowercase__ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(lowercase__ ,lowercase__ )
a_ = WavaVecaCTCTokenizer(
lowercase__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase__ ,)
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=lowercase__ ,return_attention_mask=lowercase__ ,)
a_ = WavaVecaProcessor(feature_extractor=lowercase__ ,tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
a_ = WavaVecaForCTC(lowercase__ )
else:
a_ = WavaVecaForPreTraining(lowercase__ )
if is_finetuned or is_seq_class:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a_ = argparse.Namespace(task="audio_pretraining" )
a_ = fairseq.tasks.setup_task(lowercase__ )
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=lowercase__ )
a_ = model[0].eval()
recursively_load_weights(lowercase__ ,lowercase__ ,not is_finetuned )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a_ = parser.parse_args()
a_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
from collections.abc import Callable
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[int] , a: Callable | None = None) ->None:
'''simple docstring'''
a_ = []
# Stores indexes of each item for supporting updates and deletion.
a_ = {}
# Stores current size of heap.
a_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
a_ = key or (lambda a: x)
def _lowerCAmelCase ( self: List[Any] , a: int) ->int | None:
'''simple docstring'''
return int((i - 1) / 2) if i > 0 else None
def _lowerCAmelCase ( self: Optional[int] , a: int) ->int | None:
'''simple docstring'''
a_ = int(2 * i + 1)
return left if 0 < left < self.size else None
def _lowerCAmelCase ( self: List[str] , a: int) ->int | None:
'''simple docstring'''
a_ = int(2 * i + 2)
return right if 0 < right < self.size else None
def _lowerCAmelCase ( self: List[Any] , a: int , a: int) ->None:
'''simple docstring'''
a_ , a_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
a_ , a_ = self.arr[j], self.arr[i]
def _lowerCAmelCase ( self: List[str] , a: int , a: int) ->bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def _lowerCAmelCase ( self: Optional[Any] , a: int) ->int:
'''simple docstring'''
a_ = self._left(a)
a_ = self._right(a)
a_ = i
if left is not None and not self._cmp(a , a):
a_ = left
if right is not None and not self._cmp(a , a):
a_ = right
return valid_parent
def _lowerCAmelCase ( self: str , a: int) ->None:
'''simple docstring'''
a_ = self._parent(a)
while parent is not None and not self._cmp(a , a):
self._swap(a , a)
a_ , a_ = parent, self._parent(a)
def _lowerCAmelCase ( self: List[Any] , a: int) ->None:
'''simple docstring'''
a_ = self._get_valid_parent(a)
while valid_parent != index:
self._swap(a , a)
a_ , a_ = valid_parent, self._get_valid_parent(a)
def _lowerCAmelCase ( self: Optional[int] , a: int , a: int) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
a_ = self.pos_map[item]
a_ = [item, self.key(a)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(a)
self._heapify_down(a)
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
a_ = self.pos_map[item]
del self.pos_map[item]
a_ = self.arr[self.size - 1]
a_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(a)
self._heapify_down(a)
def _lowerCAmelCase ( self: Tuple , a: int , a: int) ->None:
'''simple docstring'''
a_ = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(a)])
else:
a_ = [item, self.key(a)]
a_ = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _lowerCAmelCase ( self: Dict) ->tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def _lowerCAmelCase ( self: List[str]) ->tuple | None:
'''simple docstring'''
a_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def __UpperCAmelCase () -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a_ = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
a_ = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
a_ = int(re.match(r".*layer_(\d*).*" ,lowercase__ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
a_ = re.search(r"[^\d](\d+)$" ,str(lowercase__ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
a_ = int(bit_search.groups()[0] )
return bit_size // 8
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if bloom_config_file == "":
a_ = BloomConfig()
else:
a_ = BloomConfig.from_json_file(lowercase__ )
if shard_model:
a_ = os.listdir(lowercase__ )
a_ = sorted(filter(lambda lowercase__ : s.startswith("layer" ) and "model_00" in s ,lowercase__ ) )
a_ = {"weight_map": {}, "metadata": {}}
a_ = 0
a_ = None
a_ = BloomConfig()
for j, file in enumerate(lowercase__ ):
print("Processing file: {}".format(lowercase__ ) )
a_ = None
for i in range(lowercase__ ):
# load all TP files
a_ = file.replace("model_00" ,F"""model_0{i}""" )
a_ = torch.load(os.path.join(lowercase__ ,lowercase__ ) ,map_location="cpu" )
# Rename keys in the transformers names
a_ = list(temp.keys() )
for key in keys:
a_ = temp.pop(lowercase__ )
if tensors is None:
a_ = temp
else:
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a_ = torch.cat([tensors[key], temp[key]] ,dim=lowercase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a_ = tensors[key] / pretraining_tp
torch.save(
lowercase__ ,os.path.join(
lowercase__ ,"pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) ,str(len(lowercase__ ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
a_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
a_ = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) ,str(len(lowercase__ ) ).zfill(5 ) )
a_ = BloomConfig()
a_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
a_ = total_size
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowercase__ ,WEIGHTS_NAME + ".index.json" ) ,"w" ,encoding="utf-8" ) as f:
a_ = json.dumps(lowercase__ ,indent=2 ,sort_keys=lowercase__ ) + "\n"
f.write(lowercase__ )
else:
a_ = BloomModel(lowercase__ )
a_ = os.listdir(lowercase__ )
a_ = sorted(filter(lambda lowercase__ : s.startswith("layer" ) and "model_00" in s ,lowercase__ ) )
a_ = None
for i, file in enumerate(lowercase__ ):
a_ = None
for i in range(lowercase__ ):
# load all TP files
a_ = file.replace("model_00" ,F"""model_0{i}""" )
a_ = torch.load(os.path.join(lowercase__ ,lowercase__ ) ,map_location="cpu" )
# Rename keys in the transformers names
a_ = list(temp.keys() )
for key in keys:
a_ = temp.pop(lowercase__ )
if tensors is None:
a_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a_ = torch.cat([tensors[key], temp[key]] ,dim=lowercase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a_ = tensors[key] / pretraining_tp
a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
a_ = set(other_keys.missing_keys )
else:
a_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
a_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
a_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
a_ = model.to(config.torch_dtype )
torch.save(model.state_dict() ,lowercase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
a_ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
a_ = range(2, 20 + 1)
a_ = [10**k for k in range(ks[-1] + 1)]
a_ = {}
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = sum(a_i[j] for j in range(lowercase__ ,len(lowercase__ ) ) )
a_ = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) ,lowercase__ ) ) )
a_ , a_ = 0, 0
a_ = n - i
a_ = memo.get(lowercase__ )
if sub_memo is not None:
a_ = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
a_ = -1
for _k in range(len(lowercase__ ) - 1 ,-1 ,-1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a_ = _k
break
if max_jump >= 0:
a_ , a_ , a_ = jumps[max_jump]
# since the difference between jumps is cached, add c
a_ = diff + c
for j in range(min(lowercase__ ,len(lowercase__ ) ) ):
a_ , a_ = divmod(lowercase__ ,10 )
if new_c > 0:
add(lowercase__ ,lowercase__ ,lowercase__ )
else:
a_ = []
else:
a_ = {c: []}
a_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a_ , a_ = next_term(lowercase__ ,k - 1 ,i + dn ,lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a_ , a_ = compute(lowercase__ ,lowercase__ ,i + dn ,lowercase__ )
diff += _diff
dn += terms_jumped
a_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
a_ = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ ,(diff, dn, k) )
return (diff, dn)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a_ = i
a_ , a_ , a_ = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a_ = ds_c + ds_b
diff += addend
a_ = 0
for j in range(lowercase__ ):
a_ = a_i[j] + addend
a_ , a_ = divmod(lowercase__ ,10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ ,lowercase__ ,lowercase__ )
return diff, i - start_i
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
for j in range(lowercase__ ,len(lowercase__ ) ):
a_ = digits[j] + addend
if s >= 10:
a_ , a_ = divmod(lowercase__ ,10 )
a_ = addend // 10 + quotient
else:
a_ = s
a_ = addend // 10
if addend == 0:
break
while addend > 0:
a_ , a_ = divmod(lowercase__ ,10 )
digits.append(lowercase__ )
def __UpperCAmelCase (lowercase__ = 10**15 ) -> int:
'''simple docstring'''
a_ = [1]
a_ = 1
a_ = 0
while True:
a_ , a_ = next_term(lowercase__ ,20 ,i + dn ,lowercase__ )
dn += terms_jumped
if dn == n - i:
break
a_ = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''rwkv'''
_UpperCAmelCase ={'''max_position_embeddings''': '''context_length'''}
def __init__( self: List[str] , a: str=5_02_77 , a: Optional[Any]=10_24 , a: Any=40_96 , a: Union[str, Any]=32 , a: Optional[int]=None , a: Optional[Any]=None , a: int=1e-5 , a: Optional[int]=0 , a: Optional[int]=0 , a: Any=6 , a: Any=False , a: Dict=True , **a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = vocab_size
a_ = context_length
a_ = hidden_size
a_ = num_hidden_layers
a_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
a_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
a_ = layer_norm_epsilon
a_ = rescale_every
a_ = use_cache
a_ = bos_token_id
a_ = eos_token_id
super().__init__(
tie_word_embeddings=a , bos_token_id=a , eos_token_id=a , **a)
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =CTRLTokenizer
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Dict) ->Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
a_ = dict(zip(a , range(len(a))))
a_ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
a_ = {"unk_token": "<unk>"}
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(a) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(a))
def _lowerCAmelCase ( self: Any , **a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: List[str] , a: str) ->List[Any]:
'''simple docstring'''
a_ = "adapt react readapt apt"
a_ = "adapt react readapt apt"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
a_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a_ = "adapt react readapt apt"
a_ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
a_ = tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokens + [tokenizer.unk_token]
a_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , a)
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self: int , a: List[str] , a: str=13 , a: Union[str, Any]=7 , a: Optional[Any]=True , a: Optional[Any]=True , a: str=True , a: Optional[int]=True , a: Optional[int]=99 , a: str=32 , a: str=5 , a: Optional[int]=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: int=0.1 , a: Dict=0.1 , a: Dict=1_28 , a: Optional[Any]=32 , a: str=16 , a: int=2 , a: List[str]=0.02 , a: Optional[Any]=3 , a: str=4 , a: Optional[int]=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self: int) ->Any:
'''simple docstring'''
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[str] , a: Optional[Any] , a: Optional[Any] , a: Tuple , a: Tuple , a: Union[str, Any] , a: str) ->Any:
'''simple docstring'''
a_ = NezhaModel(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a)
a_ = model(a , token_type_ids=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Any , a: int , a: Tuple , a: Dict , a: List[Any] , a: Optional[int] , a: Optional[int] , a: Optional[int] , a: List[Any] , ) ->int:
'''simple docstring'''
a_ = True
a_ = NezhaModel(a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
a_ = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
a_ = model(a , attention_mask=a , token_type_ids=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[Any] , a: Dict , a: List[Any] , a: Union[str, Any] , a: List[Any] , a: List[Any] , a: List[str]) ->List[str]:
'''simple docstring'''
a_ = NezhaForMaskedLM(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: int , a: Dict , a: Dict , a: Dict , a: Dict , a: Optional[int] , a: Any , a: Union[str, Any]) ->str:
'''simple docstring'''
a_ = NezhaForNextSentencePrediction(config=a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _lowerCAmelCase ( self: int , a: Optional[int] , a: int , a: Any , a: int , a: str , a: Union[str, Any] , a: str) ->int:
'''simple docstring'''
a_ = NezhaForPreTraining(config=a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: str , a: Dict , a: int , a: int , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = NezhaForQuestionAnswering(config=a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: List[Any] , a: Optional[Any] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Dict , a: Dict) ->Dict:
'''simple docstring'''
a_ = self.num_labels
a_ = NezhaForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self: Any , a: List[str] , a: Optional[Any] , a: Any , a: Optional[Any] , a: Optional[Any] , a: List[Any] , a: Optional[int]) ->str:
'''simple docstring'''
a_ = self.num_labels
a_ = NezhaForTokenClassification(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: List[Any] , a: List[str] , a: int , a: List[Any] , a: Optional[Any] , a: Optional[Any] , a: int , a: Dict) ->Tuple:
'''simple docstring'''
a_ = self.num_choices
a_ = NezhaForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase =True
def _lowerCAmelCase ( self: str , a: Tuple , a: Dict , a: Optional[int]=False) ->Optional[int]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class in get_values(a):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = NezhaModelTester(self)
a_ = ConfigTester(self , config_class=a , hidden_size=37)
def _lowerCAmelCase ( self: Tuple) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a)
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def _lowerCAmelCase ( self: int) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a)
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a)
def _lowerCAmelCase ( self: Any) ->int:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a)
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a)
def _lowerCAmelCase ( self: List[str]) ->List[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a)
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def _lowerCAmelCase ( self: Tuple) ->Tuple:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = NezhaModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "bert.pt"))
a_ = torch.jit.load(os.path.join(a , "bert.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(a , attention_mask=a)[0]
a_ = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4))
@slow
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(a , attention_mask=a)[0]
a_ = torch.Size((1, 6, 2_11_28))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4))
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[int]:
'''simple docstring'''
a_ = [False] * len(lowercase__ )
a_ = [s]
a_ = True
while queue:
a_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
a_ = True
a_ = u
return visited[t]
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ = [-1] * (len(lowercase__ ))
a_ = 0
a_ = []
a_ = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ):
a_ = float("Inf" )
a_ = sink
while s != source:
# Find the minimum value in select path
a_ = min(lowercase__ ,graph[parent[s]][s] )
a_ = parent[s]
max_flow += path_flow
a_ = sink
while v != source:
a_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a_ = parent[v]
for i in range(len(lowercase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Union[str, Any] , a: Union[str, Any] , a: str=13 , a: List[str]=7 , a: Tuple=True , a: Optional[Any]=True , a: str=False , a: Tuple=True , a: Optional[Any]=99 , a: Any=32 , a: Optional[int]=5 , a: Optional[Any]=4 , a: Optional[Any]=37 , a: Dict="gelu" , a: List[Any]=0.1 , a: Union[str, Any]=0.1 , a: List[Any]=5_12 , a: List[str]=16 , a: Tuple=2 , a: List[Any]=0.02 , a: Dict=3 , a: Optional[int]=4 , a: Dict=None , ) ->Any:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self: str) ->str:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple , a: Any , a: Union[str, Any] , a: Dict , a: str , a: str , a: List[str]) ->Dict:
'''simple docstring'''
a_ = LlamaModel(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: Tuple , a: Dict , a: List[str] , a: List[Any] , a: Optional[Any] , a: List[Any] , a: Any , ) ->Any:
'''simple docstring'''
a_ = True
a_ = LlamaModel(a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
a_ = model(
a , attention_mask=a , encoder_hidden_states=a , )
a_ = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: int , a: str , a: str , a: str , a: List[str] , a: int , a: List[str] , a: Optional[int] , a: Any , a: Dict , ) ->int:
'''simple docstring'''
a_ = LlamaForCausalLM(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: List[Any] , a: Any , a: List[str] , a: Optional[int] , a: str , a: Any , a: Any , a: int , a: Optional[Any] , a: Optional[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = True
a_ = True
a_ = LlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
a_ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )["hidden_states"][0]
a_ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3))
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase =(LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase =(
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Dict) ->Optional[int]:
'''simple docstring'''
a_ = LlamaModelTester(self)
a_ = ConfigTester(self , config_class=a , hidden_size=37)
def _lowerCAmelCase ( self: List[Any]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = input_dict["input_ids"]
a_ = input_ids.ne(1).to(a)
a_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
a_ = LlamaForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowerCAmelCase ( self: Tuple) ->int:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = "single_label_classification"
a_ = input_dict["input_ids"]
a_ = input_ids.ne(1).to(a)
a_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
a_ = LlamaForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowerCAmelCase ( self: Optional[int]) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = "multi_label_classification"
a_ = input_dict["input_ids"]
a_ = input_ids.ne(1).to(a)
a_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
a_ = LlamaForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test")
def _lowerCAmelCase ( self: Any) ->List[str]:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def _lowerCAmelCase ( self: str , a: str) ->Tuple:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = ids_tensor([1, 10] , config.vocab_size)
a_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
a_ = LlamaModel(a)
original_model.to(a)
original_model.eval()
a_ = original_model(a).last_hidden_state
a_ = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
a_ = {"type": scaling_type, "factor": 10.0}
a_ = LlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
a_ = scaled_model(a).last_hidden_state
a_ = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1e-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1e-5))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto")
a_ = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
a_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]])
torch.testing.assert_close(out.mean(-1) , a , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
a_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1e-5 , rtol=1e-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto")
a_ = model(torch.tensor(a))
# Expected mean on dim = -1
a_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]])
torch.testing.assert_close(out.mean(-1) , a , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
a_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1e-5 , rtol=1e-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
a_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto")
a_ = model(torch.tensor(a))
# Expected mean on dim = -1
a_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]])
torch.testing.assert_close(out.mean(-1) , a , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
a_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513])
# fmt: on
torch.testing.assert_close(out.mean(-1) , a , atol=1e-2 , rtol=1e-2)
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test")
@slow
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
a_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto")
a_ = model(torch.tensor(a))
a_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , a , atol=1e-2 , rtol=1e-2)
# fmt: off
a_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1e-5 , rtol=1e-5)
@unittest.skip("Model is curently gated")
@slow
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a_ = "Simply put, the theory of relativity states that "
a_ = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
a_ = tokenizer.encode(a , return_tensors="pt")
a_ = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=a)
# greedy generation outputs
a_ = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a)
a_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=a)
self.assertEqual(a , a)
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , *a: str , **a: Dict) ->List[str]:
'''simple docstring'''
super().__init__(*a , **a)
a_ = {}
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple , *a: List[Any] , **a: Optional[Any]) ->str:
'''simple docstring'''
a_ = super().add_tokens(a , *a , **a)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer.")
def _lowerCAmelCase ( self: Tuple , a: Union[str, Any] , *a: Optional[int] , a: Optional[int]=1 , **a: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = []
if num_vec_per_token == 1:
self.try_adding_tokens(a , *a , **a)
output.append(a)
else:
a_ = []
for i in range(a):
a_ = placeholder_token + f"""_{i}"""
self.try_adding_tokens(a , *a , **a)
output.append(a)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a_ = output
def _lowerCAmelCase ( self: str , a: List[str] , a: Optional[Any]=False , a: Optional[int]=1.0) ->str:
'''simple docstring'''
if isinstance(a , a):
a_ = []
for i in range(len(a)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a_ = self.token_map[placeholder_token]
a_ = tokens[: 1 + int(len(a) * prop_tokens_to_load)]
if vector_shuffle:
a_ = copy.copy(a)
random.shuffle(a)
a_ = text.replace(a , " ".join(a))
return text
def __call__( self: Optional[Any] , a: Optional[int] , *a: Any , a: Optional[int]=False , a: Optional[Any]=1.0 , **a: List[Any]) ->str:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
a , vector_shuffle=a , prop_tokens_to_load=a) , *a , **a , )
def _lowerCAmelCase ( self: Dict , a: str , *a: Any , a: Union[str, Any]=False , a: List[str]=1.0 , **a: List[Any]) ->Optional[Any]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
a , vector_shuffle=a , prop_tokens_to_load=a) , *a , **a , )
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __UpperCAmelCase (lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
for char in word:
a_ = ord(lowercase__ )
if not _is_chinese_char(lowercase__ ):
return 0
return 1
def __UpperCAmelCase (lowercase__ ) -> Optional[int]:
'''simple docstring'''
a_ = set()
for token in tokens:
a_ = len(lowercase__ ) > 1 and is_chinese(lowercase__ )
if chinese_word:
word_set.add(lowercase__ )
a_ = list(lowercase__ )
return word_list
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
a_ = max([len(lowercase__ ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(lowercase__ )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start ,lowercase__ )
for i in range(lowercase__ ,1 ,-1 ):
a_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
a_ = "##" + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[int]:
'''simple docstring'''
a_ = []
for i in range(0 ,len(lowercase__ ) ,100 ):
a_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
a_ = [get_chinese_word(lowercase__ ) for r in res]
ltp_res.extend(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
a_ = []
for i in range(0 ,len(lowercase__ ) ,100 ):
a_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=lowercase__ ,truncation=lowercase__ ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(lowercase__ ) == len(lowercase__ )
a_ = []
for input_ids, chinese_word in zip(lowercase__ ,lowercase__ ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(lowercase__ )
input_tokens.append(lowercase__ )
a_ = add_sub_symbol(lowercase__ ,lowercase__ )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase__ ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(lowercase__ ) == 1 and _is_chinese_char(ord(lowercase__ ) ):
ref_id.append(lowercase__ )
ref_ids.append(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
return ref_ids
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(lowercase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(lowercase__ ,lowercase__ ,lowercase__ )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
a_ = [json.dumps(lowercase__ ) + "\n" for ref in ref_ids]
f.writelines(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
a_ = parser.parse_args()
main(args)
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
a_ = -1
a_ = 0
for a in range(1 ,n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
a_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
a_ = n - a - b
if c * c == (a * a + b * b):
a_ = a * b * c
if candidate >= product:
a_ = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> float:
'''simple docstring'''
a_ = x
a_ = y
for step in range(lowercase__ ): # noqa: B007
a_ = a * a - b * b + x
a_ = 2 * a * b + y
a_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCAmelCase (lowercase__ ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __UpperCAmelCase (lowercase__ ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase__ ,1 ,1 ) )
def __UpperCAmelCase (lowercase__ = 800 ,lowercase__ = 600 ,lowercase__ = -0.6 ,lowercase__ = 0 ,lowercase__ = 3.2 ,lowercase__ = 50 ,lowercase__ = True ,) -> Image.Image:
'''simple docstring'''
a_ = Image.new("RGB" ,(image_width, image_height) )
a_ = img.load()
# loop through the image-coordinates
for image_x in range(lowercase__ ):
for image_y in range(lowercase__ ):
# determine the figure-coordinates based on the image-coordinates
a_ = figure_width / image_width * image_height
a_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
a_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
a_ = get_distance(lowercase__ ,lowercase__ ,lowercase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a_ = get_color_coded_rgb(lowercase__ )
else:
a_ = get_black_and_white_rgb(lowercase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a_ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a_ = F'https://www.google.com/search?q={query}&num=100'
a_ = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a_ = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a_ = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCAmelCase (lowercase__ ,lowercase__=0.999 ,lowercase__="cosine" ,) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
a_ = []
for i in range(lowercase__ ):
a_ = i / num_diffusion_timesteps
a_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) ,lowercase__ ) )
return torch.tensor(lowercase__ ,dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase =[e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase =2
@register_to_config
def __init__( self: Union[str, Any] , a: int = 10_00 , a: float = 0.0_0085 , a: float = 0.012 , a: str = "linear" , a: Optional[Union[np.ndarray, List[float]]] = None , a: str = "epsilon" , a: str = "linspace" , a: int = 0 , ) ->Optional[int]:
'''simple docstring'''
if trained_betas is not None:
a_ = torch.tensor(a , dtype=torch.floataa)
elif beta_schedule == "linear":
a_ = torch.linspace(a , a , a , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a_ = betas_for_alpha_bar(a)
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""")
a_ = 1.0 - self.betas
a_ = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(a , a , a)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any , a: Tuple=None) ->Dict:
'''simple docstring'''
if schedule_timesteps is None:
a_ = self.timesteps
a_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
a_ = 1 if len(a) > 1 else 0
else:
a_ = timestep.cpu().item() if torch.is_tensor(a) else timestep
a_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCAmelCase ( self: int) ->List[Any]:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCAmelCase ( self: List[str] , a: torch.FloatTensor , a: Union[float, torch.FloatTensor] , ) ->torch.FloatTensor:
'''simple docstring'''
a_ = self.index_for_timestep(a)
if self.state_in_first_order:
a_ = self.sigmas[step_index]
else:
a_ = self.sigmas_interpol[step_index]
a_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCAmelCase ( self: Dict , a: int , a: Union[str, torch.device] = None , a: Optional[int] = None , ) ->Dict:
'''simple docstring'''
a_ = num_inference_steps
a_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a_ = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a)[::-1].copy()
elif self.config.timestep_spacing == "leading":
a_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ = (np.arange(0 , a) * step_ratio).round()[::-1].copy().astype(a)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ = (np.arange(a , 0 , -step_ratio)).round().copy().astype(a)
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""")
a_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
a_ = torch.from_numpy(np.log(a)).to(a)
a_ = np.interp(a , np.arange(0 , len(a)) , a)
a_ = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
a_ = torch.from_numpy(a).to(device=a)
# interpolate sigmas
a_ = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
a_ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
a_ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(a).startswith("mps"):
# mps does not support float64
a_ = torch.from_numpy(a).to(a , dtype=torch.floataa)
else:
a_ = torch.from_numpy(a).to(a)
# interpolate timesteps
a_ = self.sigma_to_t(a).to(a , dtype=timesteps.dtype)
a_ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
a_ = torch.cat([timesteps[:1], interleaved_timesteps])
a_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a_ = defaultdict(a)
def _lowerCAmelCase ( self: List[Any] , a: Dict) ->str:
'''simple docstring'''
a_ = sigma.log()
# get distribution
a_ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a_ = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
a_ = low_idx + 1
a_ = self.log_sigmas[low_idx]
a_ = self.log_sigmas[high_idx]
# interpolate sigmas
a_ = (low - log_sigma) / (low - high)
a_ = w.clamp(0 , 1)
# transform interpolation to time range
a_ = (1 - w) * low_idx + w * high_idx
a_ = t.view(sigma.shape)
return t
@property
def _lowerCAmelCase ( self: Any) ->Tuple:
'''simple docstring'''
return self.sample is None
def _lowerCAmelCase ( self: Any , a: Union[torch.FloatTensor, np.ndarray] , a: Union[float, torch.FloatTensor] , a: Union[torch.FloatTensor, np.ndarray] , a: bool = True , ) ->Union[SchedulerOutput, Tuple]:
'''simple docstring'''
a_ = self.index_for_timestep(a)
# advance index counter by 1
a_ = timestep.cpu().item() if torch.is_tensor(a) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a_ = self.sigmas[step_index]
a_ = self.sigmas_interpol[step_index + 1]
a_ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a_ = self.sigmas[step_index - 1]
a_ = self.sigmas_interpol[step_index]
a_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a_ = 0
a_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a_ = sigma_hat if self.state_in_first_order else sigma_interpol
a_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a_ = sigma_hat if self.state_in_first_order else sigma_interpol
a_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample")
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a_ = sigma_interpol - sigma_hat
# store for 2nd order step
a_ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a_ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a_ = sigma_next - sigma_hat
a_ = self.sample
a_ = None
a_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a)
def _lowerCAmelCase ( self: Tuple , a: torch.FloatTensor , a: torch.FloatTensor , a: torch.FloatTensor , ) ->torch.FloatTensor:
'''simple docstring'''
a_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(a):
# mps does not support float64
a_ = self.timesteps.to(original_samples.device , dtype=torch.floataa)
a_ = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
a_ = self.timesteps.to(original_samples.device)
a_ = timesteps.to(original_samples.device)
a_ = [self.index_for_timestep(a , a) for t in timesteps]
a_ = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
a_ = sigma.unsqueeze(-1)
a_ = original_samples + noise * sigma
return noisy_samples
def __len__( self: int) ->Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a_ = re.compile(r'\s+')
def __UpperCAmelCase (lowercase__ ) -> Dict:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowercase__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
a_ = [len(lowercase__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(lowercase__ ), "line_max": max(lowercase__ )}
def __UpperCAmelCase (lowercase__ ) -> Optional[int]:
'''simple docstring'''
a_ = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase (lowercase__ ,lowercase__=5 ) -> List[Any]:
'''simple docstring'''
a_ = ["auto-generated", "autogenerated", "automatically generated"]
a_ = example["content"].splitlines()
for _, line in zip(range(lowercase__ ) ,lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase (lowercase__ ,lowercase__=5 ,lowercase__=0.05 ) -> Optional[Any]:
'''simple docstring'''
a_ = ["unit tests", "test file", "configuration file"]
a_ = example["content"].splitlines()
a_ = 0
a_ = 0
# first test
for _, line in zip(range(lowercase__ ) ,lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
a_ = example["content"].count("\n" )
a_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase (lowercase__ ) -> List[Any]:
'''simple docstring'''
a_ = ["def ", "class ", "for ", "while "]
a_ = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase (lowercase__ ,lowercase__=4 ) -> Dict:
'''simple docstring'''
a_ = example["content"].splitlines()
a_ = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase (lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = tokenizer(example["content"] ,truncation=lowercase__ )["input_ids"]
a_ = len(example["content"] ) / len(lowercase__ )
return {"ratio": ratio}
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = {}
results.update(get_hash(lowercase__ ) )
results.update(line_stats(lowercase__ ) )
results.update(alpha_stats(lowercase__ ) )
results.update(char_token_ratio(lowercase__ ) )
results.update(is_autogenerated(lowercase__ ) )
results.update(is_config_or_test(lowercase__ ) )
results.update(has_no_keywords(lowercase__ ) )
results.update(has_few_assignments(lowercase__ ) )
return results
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if not check_uniques(lowercase__ ,lowercase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase (lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"rb" ) as f_in:
with gzip.open(str(lowercase__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(lowercase__ ,lowercase__ )
os.unlink(lowercase__ )
# Settings
a_ = HfArgumentParser(PreprocessingArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
a_ = time.time()
a_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
a_ = set(ds.unique('hash'))
a_ = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
a_ = time.time()
a_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a_ = time.time()
a_ , a_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
a_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
a_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a_ = str(data_dir / F'file-{file_number+1:012}.json')
a_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , lowercase_ ):
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ = load_tool("text-classification")
self.tool.setup()
a_ = load_tool("text-classification" , remote=a)
def _lowerCAmelCase ( self: Any) ->List[str]:
'''simple docstring'''
a_ = self.tool("That's quite cool" , ["positive", "negative"])
self.assertEqual(a , "positive")
def _lowerCAmelCase ( self: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = self.remote_tool("That's quite cool" , ["positive", "negative"])
self.assertEqual(a , "positive")
def _lowerCAmelCase ( self: int) ->Any:
'''simple docstring'''
a_ = self.tool(text="That's quite cool" , labels=["positive", "negative"])
self.assertEqual(a , "positive")
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
a_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"])
self.assertEqual(a , "positive")
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase (lowercase__ ,lowercase__=() ,lowercase__=None ,lowercase__="no" ,lowercase__="29500" ) -> List[Any]:
'''simple docstring'''
a_ = False
a_ = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
a_ = True
elif "IPython" in sys.modules:
a_ = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
a_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,lowercase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
a_ = 8
a_ = PrepareForLaunch(lowercase__ ,distributed_type="TPU" )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(lowercase__ ,args=lowercase__ ,nprocs=lowercase__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*lowercase__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase__ ,master_addr="127.0.01" ,master_port=lowercase__ ,mixed_precision=lowercase__ ):
a_ = PrepareForLaunch(lowercase__ ,distributed_type="MULTI_GPU" )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(lowercase__ ,args=lowercase__ ,nprocs=lowercase__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
a_ = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__=() ,lowercase__=2 ) -> Optional[int]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
a_ = PrepareForLaunch(lowercase__ ,debug=lowercase__ )
start_processes(lowercase__ ,args=lowercase__ ,nprocs=lowercase__ ,start_method="fork" )
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a_ = 2_048
a_ = 4_096
a_ = 42
a_ = os.environ.pop('PROCESS_TRAIN', 'false')
a_ = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def __UpperCAmelCase (lowercase__ ) -> Dict:
'''simple docstring'''
def choose_first(lowercase__ ,lowercase__=False ):
assert isinstance(lowercase__ ,lowercase__ )
if len(lowercase__ ) == 1:
a_ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
a_ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
a_ = {"id": example["id"]}
a_ = example["annotations"]
a_ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
a_ = ["yes"] if 1 in yes_no_answer else ["no"]
a_ = a_ = []
a_ = a_ = []
a_ = ["<cls>"]
else:
a_ = ["short"]
a_ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
a_ = ["long"]
a_ = choose_first(annotation["long_answer"] ,is_long_answer=lowercase__ )
a_ = []
answer.update(lowercase__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
a_ = True
else:
a_ = False
a_ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] ,lowercase__ ) for k in cols ):
raise ValueError("Issue in ID" ,example["id"] )
return answer
def __UpperCAmelCase (lowercase__ ,lowercase__=False ) -> List[str]:
'''simple docstring'''
a_ = _get_single_answer(lowercase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
a_ = example["document"]["tokens"]
a_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
a_ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
a_ = example["document"]["tokens"]
a_ = answer["start_token"]
a_ = answer["end_token"]
a_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
a_ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
a_ = doc["is_html"][answer["start_token"] : answer["end_token"]]
a_ = doc["token"][answer["start_token"] : answer["end_token"]]
a_ = " ".join([old[i] for i in range(len(lowercase__ ) ) if not is_html[i]] )
if new != old:
print("ID:" ,example["id"] )
print("New:" ,lowercase__ ,end="\n" )
print("Old:" ,lowercase__ ,end="\n\n" )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=2048 ,lowercase__=4096 ,lowercase__=True ) -> List[str]:
'''simple docstring'''
a_ = get_context_and_ans(lowercase__ ,assertion=lowercase__ )
a_ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
a_ = tokenizer(example["question"]["text"] ,out["context"] ).input_ids
a_ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
a_ = []
a_ = []
a_ = input_ids[:q_len]
a_ = range(lowercase__ ,len(lowercase__ ) ,max_length - doc_stride )
for i in doc_start_indices:
a_ = i + max_length - q_len
a_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase__ ),
"end_token": [-100] * len(lowercase__ ),
"category": category,
},
}
a_ = out["context"].split()
a_ = splitted_context[answer["end_token"]]
a_ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) ,add_special_tokens=lowercase__ ,).input_ids )
a_ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) ,add_special_tokens=lowercase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
a_ = len(tokenizer(lowercase__ ,add_special_tokens=lowercase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
a_ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
a_ = answer["start_token"]
a_ = answer["end_token"]
if assertion:
a_ = tokenizer.decode(lowercase__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" ,answer["span"] )
print("NEW:" ,lowercase__ ,end="\n\n" )
if len(lowercase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
a_ = input_ids[:q_len]
a_ = range(lowercase__ ,len(lowercase__ ) ,max_length - doc_stride )
a_ = []
a_ = []
a_ = []
a_ = [] # null, yes, no, long, short
for i in doc_start_indices:
a_ = i + max_length - q_len
a_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
a_ = start_token - i + q_len
a_ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
a_ = -100
a_ = -100
answers_category.append("null" )
a_ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase__ )
answers_end_token.append(lowercase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" ,example["id"] )
print("New:" ,tokenizer.decode(lowercase__ ) )
print("Old:" ,tokenizer.decode(lowercase__ ) ,end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=2048 ,lowercase__=4096 ,lowercase__=False ) -> Optional[int]:
'''simple docstring'''
a_ = get_strided_contexts_and_ans(
lowercase__ ,lowercase__ ,doc_stride=lowercase__ ,max_length=lowercase__ ,assertion=lowercase__ ,)
return example
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
with jsonlines.open(lowercase__ ,"a" ) as writer:
for example in tqdm(lowercase__ ,total=len(lowercase__ ) ,desc="Saving samples ... " ):
a_ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] ,labels["start_token"] ,labels["end_token"] ,labels["category"] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
a_ = load_dataset('natural_questions')
a_ = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
a_ = data['train' if PROCESS_TRAIN == 'true' else 'validation']
a_ = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
a_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
a_ = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
a_ = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =DiTPipeline
_UpperCAmelCase =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase =PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Any) ->int:
'''simple docstring'''
torch.manual_seed(0)
a_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=a , )
a_ = AutoencoderKL()
a_ = DDIMScheduler()
a_ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _lowerCAmelCase ( self: Any , a: Any , a: str=0) ->str:
'''simple docstring'''
if str(a).startswith("mps"):
a_ = torch.manual_seed(a)
else:
a_ = torch.Generator(device=a).manual_seed(a)
a_ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
a_ = "cpu"
a_ = self.get_dummy_components()
a_ = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
a_ = self.get_dummy_inputs(a)
a_ = pipe(**a).images
a_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3))
a_ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457])
a_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1e-3)
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: List[str]) ->List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self: str) ->str:
'''simple docstring'''
a_ = torch.manual_seed(0)
a_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256")
pipe.to("cuda")
a_ = ["vase", "umbrella", "white shark", "white wolf"]
a_ = pipe.get_label_ids(a)
a_ = pipe(a , generator=a , num_inference_steps=40 , output_type="np").images
for word, image in zip(a , a):
a_ = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""")
assert np.abs((expected_image - image).max()) < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
a_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512")
a_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda")
a_ = ["vase", "umbrella"]
a_ = pipe.get_label_ids(a)
a_ = torch.manual_seed(0)
a_ = pipe(a , generator=a , num_inference_steps=25 , output_type="np").images
for word, image in zip(a , a):
a_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""")
assert np.abs((expected_image - image).max()) < 1e-1
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =CLIPTokenizer
_UpperCAmelCase =CLIPTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase ={}
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
super().setUp()
# fmt: off
a_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a_ = dict(zip(a , range(len(a))))
a_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a_ = {"unk_token": "<unk>"}
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(a) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(a))
def _lowerCAmelCase ( self: Dict , **a: Optional[int]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: Dict , a: List[Any]) ->Tuple:
'''simple docstring'''
a_ = "lower newer"
a_ = "lower newer"
return input_text, output_text
def _lowerCAmelCase ( self: Tuple) ->int:
'''simple docstring'''
a_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a_ = "lower newer"
a_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a_ = tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokens + [tokenizer.unk_token]
a_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , a)
@require_ftfy
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a_ = self.tokenizer_class.from_pretrained(a , **a)
a_ = self.rust_tokenizer_class.from_pretrained(a , **a)
a_ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a_ = tokenizer_s.tokenize(a)
a_ = tokenizer_r.tokenize(a)
self.assertListEqual(a , a)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a_ = "xa\u0303y" + " " + "x\xe3y"
a_ = tokenizer_s.tokenize(a)
a_ = tokenizer_r.tokenize(a)
self.assertListEqual(a , a)
# Test that the tokenization is identical on unicode of space type
a_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a_ = tokenizer_s.tokenize(a)
a_ = tokenizer_r.tokenize(a)
self.assertListEqual(a , a)
# Test that the tokenization is identical on unicode of line break type
a_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a_ = tokenizer_s.tokenize(a)
a_ = tokenizer_r.tokenize(a)
self.assertListEqual(a , a)
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a_ = f"""{text_of_1_token} {text_of_1_token}"""
a_ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
a_ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a)
self.assertEqual(encoding.offset_mapping[0] , (0, len(a)))
self.assertEqual(
encoding.offset_mapping[1] , (len(a) + 1, len(a) + 1 + len(a)) , )
a_ = f""" {text}"""
a_ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
a_ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a) + 1, 1 + len(a) + 1 + len(a)) , )
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer")
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format."))
@require_ftfy
def _lowerCAmelCase ( self: Optional[Any]) ->List[str]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def _lowerCAmelCase ( self: Union[str, Any]) ->List[str]:
'''simple docstring'''
pass
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: Optional[Any]) ->List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def _lowerCAmelCase ( self: Optional[int] , a: List[str] , a: List[Any] , a: Optional[Any]=False) ->List[Any]:
'''simple docstring'''
if return_pvalue:
a_ = pearsonr(a , a)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a , a)[0])}
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = tempfile.mkdtemp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
a_ = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"do_convert_rgb": True,
}
a_ = os.path.join(self.tmpdirname , a)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(a , a)
def _lowerCAmelCase ( self: Optional[int] , **a: List[str]) ->Dict:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: List[Any] , **a: List[str]) ->List[Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: Any , **a: Tuple) ->Dict:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: Dict) ->Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a_ = [Image.fromarray(np.moveaxis(a , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self: Any) ->Union[str, Any]:
'''simple docstring'''
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = self.get_image_processor()
a_ = ChineseCLIPProcessor(tokenizer=a , image_processor=a)
processor_slow.save_pretrained(self.tmpdirname)
a_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a)
a_ = ChineseCLIPProcessor(tokenizer=a , image_processor=a)
processor_fast.save_pretrained(self.tmpdirname)
a_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , a)
self.assertIsInstance(processor_fast.tokenizer , a)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , a)
self.assertIsInstance(processor_fast.image_processor , a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a_ = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)")
a_ = self.get_image_processor(do_normalize=a)
a_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=a)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , a)
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = ChineseCLIPProcessor(tokenizer=a , image_processor=a)
a_ = self.prepare_image_inputs()
a_ = image_processor(a , return_tensors="np")
a_ = processor(images=a , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _lowerCAmelCase ( self: Tuple) ->Tuple:
'''simple docstring'''
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = ChineseCLIPProcessor(tokenizer=a , image_processor=a)
a_ = "Alexandra,T-shirt的价格是15便士。"
a_ = processor(text=a)
a_ = tokenizer(a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = ChineseCLIPProcessor(tokenizer=a , image_processor=a)
a_ = "Alexandra,T-shirt的价格是15便士。"
a_ = self.prepare_image_inputs()
a_ = processor(text=a , images=a)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(a):
processor()
def _lowerCAmelCase ( self: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = ChineseCLIPProcessor(tokenizer=a , image_processor=a)
a_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ = processor.batch_decode(a)
a_ = tokenizer.batch_decode(a)
self.assertListEqual(a , a)
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = ChineseCLIPProcessor(tokenizer=a , image_processor=a)
a_ = "Alexandra,T-shirt的价格是15便士。"
a_ = self.prepare_image_inputs()
a_ = processor(text=a , images=a)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
a_ = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode('utf-8').split()
a_ = '|'.join(sys.argv[1:])
a_ = re.compile(rF'^({joined_dirs}).*?\.py$')
a_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''levit'''
def __init__( self: List[str] , a: int=2_24 , a: Optional[int]=3 , a: str=3 , a: List[Any]=2 , a: Optional[int]=1 , a: Any=16 , a: Optional[int]=[1_28, 2_56, 3_84] , a: Tuple=[4, 8, 12] , a: Dict=[4, 4, 4] , a: str=[16, 16, 16] , a: Dict=0 , a: Optional[int]=[2, 2, 2] , a: Union[str, Any]=[2, 2, 2] , a: Union[str, Any]=0.02 , **a: List[Any] , ) ->Any:
'''simple docstring'''
super().__init__(**a)
a_ = image_size
a_ = num_channels
a_ = kernel_size
a_ = stride
a_ = padding
a_ = hidden_sizes
a_ = num_attention_heads
a_ = depths
a_ = key_dim
a_ = drop_path_rate
a_ = patch_size
a_ = attention_ratio
a_ = mlp_ratio
a_ = initializer_range
a_ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =version.parse('''1.11''' )
@property
def _lowerCAmelCase ( self: Tuple) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def _lowerCAmelCase ( self: List[str]) ->float:
'''simple docstring'''
return 1e-4
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
a_ = 8.314_462 # Unit - J mol-1 K-1
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Tuple , a: Union[str, Any] , a: str=13 , a: Dict=7 , a: Any=True , a: List[Any]=True , a: Optional[Any]=True , a: List[str]=True , a: List[Any]=99 , a: Tuple=32 , a: Optional[int]=5 , a: Union[str, Any]=4 , a: Dict=37 , a: Optional[int]="gelu" , a: Union[str, Any]=0.1 , a: List[Any]=0.1 , a: str=5_12 , a: Dict=16 , a: Tuple=2 , a: Union[str, Any]=0.02 , a: Union[str, Any]=3 , a: Tuple=4 , a: Dict=None , ) ->Tuple:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def _lowerCAmelCase ( self: Optional[Any]) ->List[Any]:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self: Any) ->Tuple:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self: Tuple , a: Optional[Any] , a: str , a: str , a: Dict , a: Union[str, Any] , a: int , a: Optional[int]) ->List[str]:
'''simple docstring'''
a_ = NystromformerModel(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a)
a_ = model(a , token_type_ids=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: Optional[Any] , a: Union[str, Any] , a: Dict , a: int , a: int , a: int) ->str:
'''simple docstring'''
a_ = NystromformerForMaskedLM(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: List[Any] , a: Any , a: Optional[Any] , a: Optional[Any] , a: str , a: Optional[int] , a: Tuple , a: Optional[Any]) ->Dict:
'''simple docstring'''
a_ = NystromformerForQuestionAnswering(config=a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: str , a: int , a: Optional[int] , a: List[str] , a: Any , a: Dict , a: Union[str, Any] , a: Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = NystromformerForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self: int , a: str , a: Optional[Any] , a: List[str] , a: str , a: List[str] , a: Any , a: Tuple) ->Optional[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = NystromformerForTokenClassification(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Tuple , a: Union[str, Any] , a: Optional[Any] , a: Optional[Any] , a: int , a: str , a: Union[str, Any] , a: int) ->List[str]:
'''simple docstring'''
a_ = self.num_choices
a_ = NystromformerForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: str) ->int:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Tuple) ->Any:
'''simple docstring'''
a_ = NystromformerModelTester(self)
a_ = ConfigTester(self , config_class=a , hidden_size=37)
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a)
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = NystromformerModel.from_pretrained(a)
self.assertIsNotNone(a)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
@slow
def _lowerCAmelCase ( self: Optional[Any]) ->int:
'''simple docstring'''
a_ = "the [MASK] of Belgium is Brussels"
a_ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512")
a_ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512")
a_ = tokenizer(a , return_tensors="pt")
with torch.no_grad():
a_ = model(encoding.input_ids).logits
a_ = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(a) , "capital")
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase__ ,int(b / 2 ) ) * actual_power(lowercase__ ,int(b / 2 ) )
else:
return a * actual_power(lowercase__ ,int(b / 2 ) ) * actual_power(lowercase__ ,int(b / 2 ) )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(lowercase__ ,lowercase__ )
return actual_power(lowercase__ ,lowercase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Tuple , a: int , ) ->List[str]:
'''simple docstring'''
a_ = parent
a_ = 13
a_ = 7
a_ = 30
a_ = self.seq_length + self.mem_len
a_ = 15
a_ = True
a_ = True
a_ = 99
a_ = [10, 50, 80]
a_ = 32
a_ = 32
a_ = 4
a_ = 8
a_ = 1_28
a_ = 2
a_ = 2
a_ = None
a_ = 1
a_ = 0
a_ = 3
a_ = self.vocab_size - 1
a_ = 0.01
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
random.seed(self.seed)
tf.random.set_seed(self.seed)
def _lowerCAmelCase ( self: Optional[Any] , a: str , a: Tuple , a: Union[str, Any] , a: Union[str, Any]) ->str:
'''simple docstring'''
a_ = TFTransfoXLModel(a)
a_ , a_ = model(a).to_tuple()
a_ = {"input_ids": input_ids_a, "mems": mems_a}
a_ , a_ = model(a).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCAmelCase ( self: Tuple , a: str , a: Optional[Any] , a: str , a: Optional[int]) ->List[Any]:
'''simple docstring'''
a_ = TFTransfoXLLMHeadModel(a)
a_ , a_ = model(a).to_tuple()
a_ = {"input_ids": input_ids_a, "labels": lm_labels}
a_ , a_ = model(a).to_tuple()
a_ , a_ = model([input_ids_a, mems_a]).to_tuple()
a_ = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
a_ , a_ = model(a).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCAmelCase ( self: str , a: int , a: Union[str, Any] , a: Union[str, Any] , a: List[Any]) ->Tuple:
'''simple docstring'''
a_ = TFTransfoXLForSequenceClassification(a)
a_ = model(a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self: List[Any]) ->str:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
((a_) , (a_) , (a_) , (a_)) = config_and_inputs
a_ = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCAmelCase =() if is_tf_available() else ()
_UpperCAmelCase =(
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: List[Any] , a: List[str] , a: Dict , a: List[Any] , a: int , a: List[str]) ->int:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = TFTransfoXLModelTester(self)
a_ = ConfigTester(self , config_class=a , d_embed=37)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: str) ->str:
'''simple docstring'''
self.model_tester.set_seed()
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a)
def _lowerCAmelCase ( self: Tuple) ->Optional[int]:
'''simple docstring'''
self.model_tester.set_seed()
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a)
def _lowerCAmelCase ( self: Tuple) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a)
def _lowerCAmelCase ( self: List[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a_ = model_class(a)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
a_ = model.get_output_embeddings()
assert isinstance(a , tf.keras.layers.Layer)
a_ = model.get_bias()
assert name is None
else:
a_ = model.get_output_embeddings()
assert x is None
a_ = model.get_bias()
assert name is None
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
pass
@slow
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFTransfoXLModel.from_pretrained(a)
self.assertIsNotNone(a)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def _lowerCAmelCase ( self: Dict) ->str:
'''simple docstring'''
pass
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def _lowerCAmelCase ( self: int) ->Union[str, Any]:
'''simple docstring'''
a_ = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
a_ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a_ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a_ = model.generate(a , max_length=2_00 , do_sample=a)
self.assertListEqual(output_ids[0].numpy().tolist() , a)
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = 'src/diffusers'
a_ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
a_ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ = spec.loader.load_module()
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
return line.startswith(lowercase__ ) or len(lowercase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" ,lowercase__ ) is not None
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = object_name.split("." )
a_ = 0
# First let's find the module where our object lives.
a_ = parts[i]
while i < len(lowercase__ ) and not os.path.isfile(os.path.join(lowercase__ ,F"""{module}.py""" ) ):
i += 1
if i < len(lowercase__ ):
a_ = os.path.join(lowercase__ ,parts[i] )
if i >= len(lowercase__ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowercase__ ,F"""{module}.py""" ) ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
a_ = f.readlines()
# Now let's find the class / func in the code!
a_ = ""
a_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase__ ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase__ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a_ = line_index
while line_index < len(lowercase__ ) and _should_continue(lines[line_index] ,lowercase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
return "".join(lowercase__ )
a_ = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
a_ = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
a_ = re.compile(r'<FILL\s+[^>]*>')
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
a_ = code.split("\n" )
a_ = 0
while idx < len(lowercase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase__ ):
return re.search(r"^(\s*)\S" ,lines[idx] ).groups()[0]
return ""
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
a_ = len(get_indent(lowercase__ ) ) > 0
if has_indent:
a_ = F"""class Bla:\n{code}"""
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=lowercase__ )
a_ = black.format_str(lowercase__ ,mode=lowercase__ )
a_ , a_ = style_docstrings_in_code(lowercase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def __UpperCAmelCase (lowercase__ ,lowercase__=False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
a_ = f.readlines()
a_ = []
a_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase__ ):
a_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a_ , a_ , a_ = search.groups()
a_ = find_code_in_diffusers(lowercase__ )
a_ = get_indent(lowercase__ )
a_ = line_index + 1 if indent == theoretical_indent else line_index + 2
a_ = theoretical_indent
a_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a_ = True
while line_index < len(lowercase__ ) and should_continue:
line_index += 1
if line_index >= len(lowercase__ ):
break
a_ = lines[line_index]
a_ = _should_continue(lowercase__ ,lowercase__ ) and re.search(F"""^{indent}# End copy""" ,lowercase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
a_ = "".join(lowercase__ )
# Remove any nested `Copied from` comments to avoid circular copies
a_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowercase__ ) is None]
a_ = "\n".join(lowercase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase__ ) > 0:
a_ = replace_pattern.replace("with" ,"" ).split("," )
a_ = [_re_replace_pattern.search(lowercase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a_ , a_ , a_ = pattern.groups()
a_ = re.sub(lowercase__ ,lowercase__ ,lowercase__ )
if option.strip() == "all-casing":
a_ = re.sub(obja.lower() ,obja.lower() ,lowercase__ )
a_ = re.sub(obja.upper() ,obja.upper() ,lowercase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a_ = blackify(lines[start_index - 1] + theoretical_code )
a_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a_ = start_index + 1
if overwrite and len(lowercase__ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(lowercase__ ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lowercase__ )
return diffs
def __UpperCAmelCase (lowercase__ = False ) -> str:
'''simple docstring'''
a_ = glob.glob(os.path.join(lowercase__ ,"**/*.py" ) ,recursive=lowercase__ )
a_ = []
for filename in all_files:
a_ = is_copy_consistent(lowercase__ ,lowercase__ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowercase__ ) > 0:
a_ = "\n".join(lowercase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = 'true'
def __UpperCAmelCase (lowercase__ ,lowercase__=82 ,lowercase__=16 ) -> Dict:
'''simple docstring'''
set_seed(42 )
a_ = RegressionModel()
a_ = deepcopy(lowercase__ )
a_ = RegressionDataset(length=lowercase__ )
a_ = DataLoader(lowercase__ ,batch_size=lowercase__ )
model.to(accelerator.device )
a_ , a_ = accelerator.prepare(lowercase__ ,lowercase__ )
return model, ddp_model, dataloader
def __UpperCAmelCase (lowercase__ ,lowercase__=False ) -> int:
'''simple docstring'''
a_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
a_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(lowercase__ ):
a_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=lowercase__ ,max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
a_ = dataset.map(
lowercase__ ,batched=lowercase__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
a_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(lowercase__ ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(lowercase__ ,shuffle=lowercase__ ,collate_fn=lowercase__ ,batch_size=16 )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = Accelerator(dispatch_batches=lowercase__ ,split_batches=lowercase__ )
a_ = get_dataloader(lowercase__ ,not dispatch_batches )
a_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=lowercase__ )
a_ , a_ = accelerator.prepare(lowercase__ ,lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ = []
for batch in dataloader:
a_ , a_ = batch.values()
with torch.no_grad():
a_ = model(lowercase__ )
a_ , a_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a_ , a_ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
a_ , a_ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def __UpperCAmelCase (lowercase__ ,lowercase__=82 ,lowercase__=False ,lowercase__=False ,lowercase__=16 ) -> int:
'''simple docstring'''
a_ , a_ , a_ = get_basic_setup(lowercase__ ,lowercase__ ,lowercase__ )
a_ , a_ = generate_predictions(lowercase__ ,lowercase__ ,lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}"""
def __UpperCAmelCase (lowercase__ = False ,lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = evaluate.load("glue" ,"mrpc" )
a_ , a_ = get_mrpc_setup(lowercase__ ,lowercase__ )
# First do baseline
a_ , a_ , a_ = setup["no"]
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
a_ = model(**lowercase__ )
a_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ ,references=batch["labels"] )
a_ = metric.compute()
# Then do distributed
a_ , a_ , a_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a_ = model(**lowercase__ )
a_ = outputs.logits.argmax(dim=-1 )
a_ = batch["labels"]
a_ , a_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ ,references=lowercase__ )
a_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __UpperCAmelCase () -> Dict:
'''simple docstring'''
a_ = Accelerator(split_batches=lowercase__ ,dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowercase__ ,lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a_ = Accelerator(split_batches=lowercase__ ,dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowercase__ ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
a_ = Accelerator()
test_torch_metrics(lowercase__ ,512 )
accelerator.state._reset_state()
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase () -> int:
'''simple docstring'''
a_ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
a_ = Image.open(requests.get(lowercase__ ,stream=lowercase__ ).raw ).convert("RGB" )
return image
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = dct.pop(lowercase__ )
a_ = val
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a_ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
a_ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
a_ = torch.cat((q_bias, torch.zeros_like(lowercase__ ,requires_grad=lowercase__ ), v_bias) )
a_ = qkv_bias
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = 364 if "coco" in model_name else 224
a_ = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a_ = OPTConfig.from_pretrained("facebook/opt-2.7b" ,eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
a_ = OPTConfig.from_pretrained("facebook/opt-6.7b" ,eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
a_ = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a_ = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
a_ = BlipaConfig(vision_config=lowercase__ ,text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__=None ,lowercase__=False ) -> Tuple:
'''simple docstring'''
a_ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
a_ = tokenizer("\n" ,add_special_tokens=lowercase__ ).input_ids[0]
a_ , a_ = get_blipa_config(lowercase__ ,eos_token_id=lowercase__ )
a_ = BlipaForConditionalGeneration(lowercase__ ).eval()
a_ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
a_ , a_ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a_ = "cuda" if torch.cuda.is_available() else "cpu"
a_ , a_ , a_ = load_model_and_preprocess(
name=lowercase__ ,model_type=lowercase__ ,is_eval=lowercase__ ,device=lowercase__ )
original_model.eval()
print("Done!" )
# update state dict keys
a_ = original_model.state_dict()
a_ = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ ,lowercase__ ,lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a_ = state_dict.pop(lowercase__ )
if key.startswith("Qformer.bert" ):
a_ = key.replace("Qformer.bert" ,"qformer" )
if "attention.self" in key:
a_ = key.replace("self" ,"attention" )
if "opt_proj" in key:
a_ = key.replace("opt_proj" ,"language_projection" )
if "t5_proj" in key:
a_ = key.replace("t5_proj" ,"language_projection" )
if key.startswith("opt" ):
a_ = key.replace("opt" ,"language" )
if key.startswith("t5" ):
a_ = key.replace("t5" ,"language" )
a_ = val
# read in qv biases
read_in_q_v_bias(lowercase__ ,lowercase__ )
a_ , a_ = hf_model.load_state_dict(lowercase__ ,strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a_ = load_demo_image()
a_ = vis_processors["eval"](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
a_ = tokenizer(["\n"] ,return_tensors="pt" ).input_ids.to(lowercase__ )
# create processor
a_ = BlipImageProcessor(
size={"height": image_size, "width": image_size} ,image_mean=lowercase__ ,image_std=lowercase__ )
a_ = BlipaProcessor(image_processor=lowercase__ ,tokenizer=lowercase__ )
a_ = processor(images=lowercase__ ,return_tensors="pt" ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ ,lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
a_ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
a_ = hf_model(lowercase__ ,lowercase__ ).logits
else:
a_ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
a_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-100 )
a_ = hf_model(lowercase__ ,lowercase__ ,labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" ,original_logits[0, :3, :3] )
print("First values of HF logits:" ,logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a_ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] ,device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] ,lowercase__ ,atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a_ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] ,device=lowercase__ )
else:
# cast to same type
a_ = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) ,lowercase__ ,atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
a_ = ""
a_ = tokenizer(lowercase__ ,return_tensors="pt" ).input_ids.to(lowercase__ )
a_ = original_model.generate({"image": original_pixel_values} )
a_ = hf_model.generate(
lowercase__ ,lowercase__ ,do_sample=lowercase__ ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print("Original generation:" ,lowercase__ )
a_ = input_ids.shape[1]
a_ = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=lowercase__ )
a_ = [text.strip() for text in output_text]
print("HF generation:" ,lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def __UpperCAmelCase () -> Generator[int, None, None]:
'''simple docstring'''
a_ = {}
a_ = 2
while True:
a_ = factor_map.pop(lowercase__ ,lowercase__ )
if factor:
a_ = factor + prime
while x in factor_map:
x += factor
a_ = factor
else:
a_ = prime
yield prime
prime += 1
def __UpperCAmelCase (lowercase__ = 1e10 ) -> int:
'''simple docstring'''
a_ = sieve()
a_ = 1
while True:
a_ = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> tuple:
'''simple docstring'''
a_ = namedtuple("result" ,"name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" ,power / current )
elif current == 0:
return result("current" ,power / voltage )
elif power == 0:
return result("power" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
import cmath
import math
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> complex:
'''simple docstring'''
a_ = math.radians(lowercase__ )
a_ = math.radians(lowercase__ )
# Convert voltage and current to rectangular form
a_ = cmath.rect(lowercase__ ,lowercase__ )
a_ = cmath.rect(lowercase__ ,lowercase__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.