code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : int ,_a : str=None ,_a : Optional[Any]=None ,_a : Tuple=None ,_a : Union[str, Any]="resnet50" ,_a : List[str]=3 ,_a : int=32 ,_a : List[Any]=3 ,_a : Tuple=True ,_a : str=True ,):
'''simple docstring'''
_a : Any = parent
_a : List[Any] = out_indices if out_indices is not None else [4]
_a : Union[str, Any] = stage_names
_a : Union[str, Any] = out_features
_a : List[str] = backbone
_a : Dict = batch_size
_a : List[Any] = image_size
_a : Optional[Any] = num_channels
_a : Optional[int] = use_pretrained_backbone
_a : Dict = is_training
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = self.get_config()
return config, pixel_values
def __lowercase ( self : List[str] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __lowercase ( self : Optional[Any] ,_a : int ,_a : List[str] ):
'''simple docstring'''
_a : List[Any] = TimmBackbone(config=_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Tuple = model(_a )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = self.prepare_config_and_inputs()
_a, _a : Any = config_and_inputs
_a : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (TimmBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[Any] = False
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[int] = TimmBackboneModelTester(self )
_a : Optional[Any] = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = 'resnet18'
_a : Dict = 'microsoft/resnet-18'
_a : Optional[int] = AutoBackbone.from_pretrained(_a ,use_timm_backbone=_a )
_a : Optional[int] = AutoBackbone.from_pretrained(_a )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_a : Tuple = AutoBackbone.from_pretrained(_a ,use_timm_backbone=_a ,out_indices=[1, 2, 3] )
_a : List[Any] = AutoBackbone.from_pretrained(_a ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_a )
_a : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = True
_a : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_a : Any = self.all_model_classes[0]
_a : Union[str, Any] = model_class(_a )
model.to(_a )
_a : Tuple = self._prepare_for_class(_a ,_a )
_a : Any = model(**_a )
_a : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
_a : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_a : Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_a )
model.to(_a )
model.eval()
_a : List[str] = model(**_a )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_a : Union[str, Any] = copy.deepcopy(_a )
_a : str = None
_a : List[Any] = model_class(_a )
model.to(_a )
model.eval()
_a : List[str] = model(**_a )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_a : Union[str, Any] = copy.deepcopy(_a )
_a : List[Any] = False
_a : int = model_class(_a )
model.to(_a )
model.eval()
_a : str = model(**_a )
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = [0] * len(__a )
for i in range(1 , len(__a ) ):
# use last results for better performance - dynamic programming
_a : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_a : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_a : int = j
return prefix_result
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : Any = torch.load(__a , map_location='cpu' )
_a : List[Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
_a : List[str] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_a : List[str] = v
else:
_a : Dict = v
_a : Tuple = chkpt['params']
_a : List[str] = {n: v for n, v in config.items() if not isinstance(__a , (torch.FloatTensor, numpy.ndarray) )}
_a : List[Any] = chkpt['dico_word2id']
_a : List[Any] = {s + '</w>' if s.find('@@' ) == -1 and i > 1_3 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
_a : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_a : int = pytorch_dump_folder_path + '/' + CONFIG_NAME
_a : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__a , __a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 5 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 1 |
'''simple docstring'''
from itertools import count
def UpperCAmelCase_ (__a : int = 5_0 ):
"""simple docstring"""
_a : Optional[int] = [1] * min_block_length
for n in count(__a ):
fill_count_functions.append(1 )
for block_length in range(__a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowerCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ (__a : Any , __a : Tuple ):
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Dict , __a : Any ):
"""simple docstring"""
_a : Dict = tmp_path / 'cache'
_a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : List[Any] = ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[Any] ):
"""simple docstring"""
_a : Tuple = tmp_path / 'cache'
_a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_a : Dict = features.copy() if features else default_expected_features
_a : List[str] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] = ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase_ (__a : Dict , __a : List[Any] , __a : Any ):
"""simple docstring"""
_a : Any = tmp_path / 'cache'
_a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_a : Dict = ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any] , __a : Optional[Any] ):
"""simple docstring"""
if issubclass(__a , __a ):
_a : Union[str, Any] = parquet_path
elif issubclass(__a , __a ):
_a : Dict = [parquet_path]
_a : List[str] = tmp_path / 'cache'
_a : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_a : Tuple = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def UpperCAmelCase_ (__a : List[Any] , __a : Union[str, Any] , __a : Optional[int]=("train",) ):
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
_a : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ (__a : str , __a : Union[str, Any] , __a : Tuple ):
"""simple docstring"""
_a : Any = tmp_path / 'cache'
_a : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Union[str, Any] = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase_ (__a : List[Any] , __a : Any , __a : Dict ):
"""simple docstring"""
_a : Optional[Any] = tmp_path / 'cache'
_a : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_a : List[Any] = features.copy() if features else default_expected_features
_a : int = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : List[Any] = ParquetDatasetReader({'train': parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase_ (__a : Tuple , __a : Tuple , __a : Dict ):
"""simple docstring"""
if split:
_a : Dict = {split: parquet_path}
else:
_a : Optional[int] = 'train'
_a : Dict = {'train': parquet_path, 'test': parquet_path}
_a : str = tmp_path / 'cache'
_a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_a : Optional[int] = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Tuple ):
"""simple docstring"""
_a : int = ParquetDatasetWriter(__a , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_a : Tuple = pq.ParquetFile(tmp_path / 'foo.parquet' )
_a : List[Any] = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase_ (__a : List[str] , __a : int ):
"""simple docstring"""
_a : Dict = str(shared_datadir / 'test_image_rgb.jpg' )
_a : List[Any] = {'image': [image_path]}
_a : List[Any] = Features({'image': Image()} )
_a : Optional[int] = Dataset.from_dict(__a , features=__a )
_a : List[Any] = ParquetDatasetWriter(__a , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_a : Optional[Any] = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_a : str = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase_ (__a : Union[str, Any] , __a : List[Any] ):
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 5 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import math
import qiskit
def UpperCAmelCase_ (__a : int = 1 , __a : int = 1 , __a : int = 1 ):
"""simple docstring"""
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_a : Tuple = qiskit.QuantumRegister(4 , 'qr' )
_a : Optional[int] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_a : Dict = [input_a, input_a, carry_in]
_a : int = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
_a : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
_a : Optional[Any] = qiskit.execute(__a , __a , shots=1_0_0_0 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 5 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = OpenAIGPTTokenizer
__UpperCAmelCase : int = OpenAIGPTTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_a : int = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[str] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
return "lower newer", "lower newer"
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
_a : Dict = 'lower'
_a : List[str] = ['low', 'er</w>']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = tokens + ['<unk>']
_a : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Union[str, Any] ,_a : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : int = self.rust_tokenizer_class.from_pretrained(_a ,**_a )
# Simple input
_a : Dict = 'This is a simple input'
_a : str = ['This is a simple input 1', 'This is a simple input 2']
_a : str = ('This is a simple input', 'This is a pair')
_a : Union[str, Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_a ,tokenizer_r.encode ,_a ,max_length=_a ,padding='max_length' )
# Simple input
self.assertRaises(_a ,tokenizer_r.encode_plus ,_a ,max_length=_a ,padding='max_length' )
# Simple input
self.assertRaises(
_a ,tokenizer_r.batch_encode_plus ,_a ,max_length=_a ,padding='max_length' ,)
# Pair input
self.assertRaises(_a ,tokenizer_r.encode ,_a ,max_length=_a ,padding='max_length' )
# Pair input
self.assertRaises(_a ,tokenizer_r.encode_plus ,_a ,max_length=_a ,padding='max_length' )
# Pair input
self.assertRaises(
_a ,tokenizer_r.batch_encode_plus ,_a ,max_length=_a ,padding='max_length' ,)
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
pass
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a : Optional[int] ,**_a : Optional[int] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowercase ( self : Dict ,_a : int ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Dict = ObjectDetectionPipeline(model=_a ,image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowercase ( self : Union[str, Any] ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : List[str] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' ,threshold=0.0 )
self.assertGreater(len(_a ) ,0 )
for detected_object in outputs:
self.assertEqual(
_a ,{
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} ,)
import datasets
_a : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
_a : List[Any] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_a : str = object_detector(_a ,threshold=0.0 )
self.assertEqual(len(_a ) ,len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) ,0 )
for detected_object in outputs:
self.assertEqual(
_a ,{
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} ,)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_a : int = AutoModelForObjectDetection.from_pretrained(_a )
_a : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
_a : int = ObjectDetectionPipeline(model=_a ,feature_extractor=_a )
_a : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] ,)
_a : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Any = 'facebook/detr-resnet-50'
_a : List[str] = AutoModelForObjectDetection.from_pretrained(_a )
_a : str = AutoFeatureExtractor.from_pretrained(_a )
_a : Union[str, Any] = ObjectDetectionPipeline(model=_a ,feature_extractor=_a )
_a : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_a : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Tuple = 'facebook/detr-resnet-50'
_a : List[Any] = pipeline('object-detection' ,model=_a )
_a : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_a : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = 0.9985
_a : List[str] = 'facebook/detr-resnet-50'
_a : List[Any] = pipeline('object-detection' ,model=_a )
_a : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=_a )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
_a : Tuple = 0.9993
_a : Optional[Any] = pipeline('object-detection' ,model=_a ,threshold=_a )
_a : List[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] ,)
| 5 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=__a )
_a : Tuple = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : int = parser.parse_args()
if not hasattr(__a , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def UpperCAmelCase_ (__a : int , __a : List[Any] , __a : Optional[Any]=8 ):
"""simple docstring"""
_a : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,_a : UNetaDConditionModel ,_a : DDPMScheduler ,_a : VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_a ,scheduler=_a ,movq=_a ,)
_a : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self : Dict ,_a : List[str] ,_a : Optional[Any] ,_a : str ,_a : List[str] ,_a : str ,_a : Union[str, Any] ):
'''simple docstring'''
if latents is None:
_a : List[str] = randn_tensor(_a ,generator=_a ,device=_a ,dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_a : Optional[int] = latents.to(_a )
_a : List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self : Optional[Any] ,_a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_a : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
_a : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a ,_a )
def __lowercase ( self : List[Any] ,_a : int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_a : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_a, _a : Union[str, Any] = cpu_offload_with_hook(_a ,_a ,prev_module_hook=_a )
# We'll offload the last model manually.
_a : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self : Dict ):
'''simple docstring'''
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : List[Any] ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 100 ,_a : float = 4.0 ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,):
'''simple docstring'''
_a : Optional[Any] = self._execution_device
_a : Tuple = guidance_scale > 1.0
if isinstance(_a ,_a ):
_a : Union[str, Any] = torch.cat(_a ,dim=0 )
_a : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_a ,_a ):
_a : Tuple = torch.cat(_a ,dim=0 )
if do_classifier_free_guidance:
_a : List[str] = image_embeds.repeat_interleave(_a ,dim=0 )
_a : List[str] = negative_image_embeds.repeat_interleave(_a ,dim=0 )
_a : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=_a )
self.scheduler.set_timesteps(_a ,device=_a )
_a : int = self.scheduler.timesteps
_a : List[Any] = self.unet.config.in_channels
_a, _a : Optional[int] = downscale_height_and_width(_a ,_a ,self.movq_scale_factor )
# create initial latent
_a : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,_a ,_a ,_a ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_a : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a : Union[str, Any] = {'image_embeds': image_embeds}
_a : Any = self.unet(
sample=_a ,timestep=_a ,encoder_hidden_states=_a ,added_cond_kwargs=_a ,return_dict=_a ,)[0]
if do_classifier_free_guidance:
_a, _a : Optional[Any] = noise_pred.split(latents.shape[1] ,dim=1 )
_a, _a : Optional[Any] = noise_pred.chunk(2 )
_a, _a : str = variance_pred.chunk(2 )
_a : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a : str = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a, _a : Dict = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a : str = self.scheduler.step(
_a ,_a ,_a ,generator=_a ,)[0]
# post-processing
_a : Tuple = self.movq.decode(_a ,force_not_quantize=_a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_a : str = image * 0.5 + 0.5
_a : Union[str, Any] = image.clamp(0 ,1 )
_a : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_a : str = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 5 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCAmelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=__a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=__a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=__a , help='where to store parsed gold_data_path file' , )
_a : Tuple = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
_a : str = json.load(__a )
for dpr_record in tqdm(__a ):
_a : Tuple = dpr_record['question']
_a : str = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(__a ) + '\n' )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 1 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : Dict ,_a : Dict=13 ,_a : int=7 ,_a : Any=True ,_a : Dict=True ,_a : int=False ,_a : Optional[Any]=True ,_a : Optional[int]=99 ,_a : Union[str, Any]=64 ,_a : Dict=5 ,_a : List[str]=4 ,_a : Union[str, Any]=64 ,_a : Any="gelu" ,_a : Dict=0.1 ,_a : int=0.1 ,_a : Optional[int]=512 ,_a : List[str]=16 ,_a : int=2 ,_a : Optional[Any]=0.02 ,_a : int=3 ,_a : Optional[Any]=4 ,_a : Dict=None ,):
'''simple docstring'''
_a : List[str] = parent
_a : Any = batch_size
_a : str = seq_length
_a : Dict = is_training
_a : int = use_input_mask
_a : str = use_token_type_ids
_a : Dict = use_labels
_a : Optional[int] = vocab_size
_a : Tuple = hidden_size
_a : str = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Any = intermediate_size
_a : Optional[Any] = hidden_act
_a : Tuple = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : int = type_vocab_size
_a : Dict = type_sequence_label_size
_a : Tuple = initializer_range
_a : Optional[Any] = num_labels
_a : List[str] = num_choices
_a : Dict = scope
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : List[str] = None
if self.use_input_mask:
_a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Tuple = None
_a : List[Any] = None
_a : int = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_a : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : str ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Optional[Any] ,_a : Dict ,_a : Dict ,_a : str ,_a : Dict ,_a : Any ,_a : int ):
'''simple docstring'''
_a : List[str] = MPNetModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a ,_a )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : Optional[Any] ,_a : List[Any] ,_a : List[Any] ,_a : Tuple ,_a : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : List[Any] = MPNetForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(
_a ,attention_mask=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Any ,_a : Optional[int] ,_a : Tuple ):
'''simple docstring'''
_a : int = self.num_labels
_a : Union[str, Any] = MPNetForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Dict ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = self.num_choices
_a : Any = MPNetForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : Tuple = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : List[str] = model(
_a ,attention_mask=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : Any ,_a : Optional[int] ,_a : Optional[int] ,_a : List[str] ,_a : List[Any] ,_a : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = self.num_labels
_a : Optional[int] = MPNetForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
((_a), (_a), (_a), (_a), (_a), (_a)) : List[Any] = config_and_inputs
_a : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Any = True
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = MPNetModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_a )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_a : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_a : Optional[int] = model(_a )[0]
_a : Optional[int] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,_a )
_a : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1E-4 ) )
| 5 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCAmelCase = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
_a : List[str] = {}
state_dict.pop('pixel_mean' , __a )
state_dict.pop('pixel_std' , __a )
_a : Optional[int] = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : Optional[int] = key.replace(__a , __a )
if re.match(__a , __a ):
_a : List[str] = int(re.match(__a , __a ).group(2 ) )
if layer_nb == 0:
_a : Tuple = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_a : int = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_a : Union[str, Any] = key.replace('layers.2' , 'proj_out' )
_a : Any = value
_a : str = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def UpperCAmelCase_ (__a : int , __a : Optional[int] , __a : List[Any] , __a : Union[str, Any]="ybelkada/segment-anything" ):
"""simple docstring"""
_a : Tuple = hf_hub_download(__a , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
_a : Optional[Any] = SamConfig()
elif "sam_vit_l" in model_name:
_a : Tuple = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
_a : Any = SamConfig(
vision_config=__a , )
elif "sam_vit_h" in model_name:
_a : Tuple = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
_a : str = SamConfig(
vision_config=__a , )
_a : Optional[Any] = torch.load(__a , map_location='cpu' )
_a : Union[str, Any] = replace_keys(__a )
_a : List[Any] = SamImageProcessor()
_a : Any = SamProcessor(image_processor=__a )
_a : str = SamModel(__a )
hf_model.load_state_dict(__a )
_a : Optional[Any] = hf_model.to('cuda' )
_a : Tuple = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_a : str = Image.open(requests.get(__a , stream=__a ).raw ).convert('RGB' )
_a : str = [[[4_0_0, 6_5_0]]]
_a : Optional[Any] = [[1]]
_a : Optional[int] = processor(images=np.array(__a ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : Optional[int] = hf_model(**__a )
_a : Union[str, Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
_a : List[str] = processor(
images=np.array(__a ) , input_points=__a , input_labels=__a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : Dict = hf_model(**__a )
_a : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
_a : List[Any] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
_a : Dict = processor(images=np.array(__a ) , input_boxes=__a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : Dict = hf_model(**__a )
_a : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
_a : Dict = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
_a : str = [[1, 1]]
_a : int = processor(
images=np.array(__a ) , input_points=__a , input_labels=__a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : Union[str, Any] = hf_model(**__a )
_a : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCAmelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCAmelCase_ (__a : Callable , __a : float , __a : float , __a : float , __a : float ):
"""simple docstring"""
_a : int = int(np.ceil((x_end - xa) / step_size ) )
_a : List[Any] = np.zeros((n + 1,) )
_a : Any = ya
_a : Optional[Any] = xa
for k in range(__a ):
_a : Optional[Any] = y[k] + step_size * ode_func(__a , y[k] )
_a : List[str] = y[k] + (
(step_size / 2) * (ode_func(__a , y[k] ) + ode_func(x + step_size , __a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str ,_a : List[str] ,_a : int=sys.maxsize ):
'''simple docstring'''
_a : Any = 'bilinear'
_a : List[Any] = max_size
_a : List[str] = short_edge_length
def __call__( self : List[str] ,_a : int ):
'''simple docstring'''
_a : Optional[int] = []
for img in imgs:
_a, _a : List[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
_a : str = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
_a : Dict = size * 1.0 / min(_a ,_a )
if h < w:
_a, _a : Optional[Any] = size, scale * w
else:
_a, _a : List[str] = scale * h, size
if max(_a ,_a ) > self.max_size:
_a : Dict = self.max_size * 1.0 / max(_a ,_a )
_a : List[Any] = newh * scale
_a : List[str] = neww * scale
_a : List[str] = int(neww + 0.5 )
_a : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
_a : Dict = Image.fromarray(_a )
_a : str = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
_a : List[str] = np.asarray(_a )
else:
_a : int = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_a : Optional[Any] = nn.functional.interpolate(
_a ,(newh, neww) ,mode=self.interp_method ,align_corners=_a ).squeeze(0 )
img_augs.append(_a )
return img_augs
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
_a : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
_a : Optional[int] = cfg.INPUT.FORMAT
_a : Optional[int] = cfg.SIZE_DIVISIBILITY
_a : Optional[Any] = cfg.PAD_VALUE
_a : Optional[Any] = cfg.INPUT.MAX_SIZE_TEST
_a : Tuple = cfg.MODEL.DEVICE
_a : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
_a : Any = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
_a : Tuple = lambda _a : (x - self.pixel_mean) / self.pixel_std
def __lowercase ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
_a : List[Any] = tuple(max(_a ) for s in zip(*[img.shape for img in images] ) )
_a : Tuple = [im.shape[-2:] for im in images]
_a : str = [
nn.functional.pad(
_a ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(_a ,_a )
]
return torch.stack(_a ), torch.tensor(_a )
def __call__( self : Union[str, Any] ,_a : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_a ,_a ):
_a : str = [images]
if single_image:
assert len(_a ) == 1
for i in range(len(_a ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(_a ,images.pop(_a ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
_a ,torch.as_tensor(img_tensorize(images.pop(_a ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
_a : Optional[int] = torch.tensor([im.shape[:2] for im in images] )
_a : int = self.aug(_a )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_a : int = [self.normalizer(_a ) for x in images]
# now pad them to do the following operations
_a, _a : Optional[Any] = self.pad(_a )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_a : List[Any] = torch.true_divide(_a ,_a )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase_ (__a : str , __a : Union[str, Any] ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase_ (__a : Dict , __a : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!"
_a, _a : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=__a )
tensor[:, 1].clamp_(min=0 , max=__a )
tensor[:, 2].clamp_(min=0 , max=__a )
tensor[:, 3].clamp_(min=0 , max=__a )
| 5 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "convnextv2"
def __init__( self : Any ,_a : str=3 ,_a : Union[str, Any]=4 ,_a : Union[str, Any]=4 ,_a : Optional[int]=None ,_a : Optional[int]=None ,_a : List[str]="gelu" ,_a : str=0.02 ,_a : Dict=1E-12 ,_a : List[Any]=0.0 ,_a : Union[str, Any]=224 ,_a : Optional[int]=None ,_a : Tuple=None ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(**__A )
_a : str = num_channels
_a : Tuple = patch_size
_a : str = num_stages
_a : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_a : Optional[Any] = [3, 3, 9, 3] if depths is None else depths
_a : Optional[int] = hidden_act
_a : int = initializer_range
_a : List[Any] = layer_norm_eps
_a : List[Any] = drop_path_rate
_a : Any = image_size
_a : Union[str, Any] = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
_a : Optional[Any] = get_aligned_output_features_output_indices(
out_features=__A ,out_indices=__A ,stage_names=self.stage_names )
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = Github(os.environ['GITHUB_TOKEN'] )
_a : Tuple = g.get_repo('huggingface/accelerate' )
_a : Optional[int] = repo.get_issues(state='open' )
for issue in open_issues:
_a : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__UpperCAmelCase )
_a : Optional[Any] = comments[0] if len(__UpperCAmelCase ) > 0 else None
_a : str = dt.utcnow()
_a : Tuple = (current_time - issue.updated_at).days
_a : Union[str, Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : int ,_a : MutableSequence[float] ):
'''simple docstring'''
if len(_lowerCamelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_a : list[float] = list(_lowerCamelCase )
_a : Optional[Any] = degree
def __add__( self : List[str] ,_a : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_a : Union[str, Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,_lowerCamelCase )
else:
_a : Tuple = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,_lowerCamelCase )
def __sub__( self : str ,_a : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Any ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Optional[int] ,_a : Polynomial ):
'''simple docstring'''
_a : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,_lowerCamelCase )
def __lowercase ( self : str ,_a : int | float ):
'''simple docstring'''
_a : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = ''''''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCamelCase )
return polynomial
def __repr__( self : Any ):
'''simple docstring'''
return self.__str__()
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : list[float] = [0] * self.degree
for i in range(self.degree ):
_a : int = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,_lowerCamelCase )
def __lowercase ( self : Union[str, Any] ,_a : int | float = 0 ):
'''simple docstring'''
_a : list[float] = [0] * (self.degree + 2)
_a : str = constant
for i in range(self.degree + 1 ):
_a : List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,_lowerCamelCase )
def __eq__( self : List[Any] ,_a : object ):
'''simple docstring'''
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Dict ,_a : object ):
'''simple docstring'''
return not self.__eq__(_lowerCamelCase )
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 'resnet'
__UpperCAmelCase : int = ['basic', 'bottleneck']
def __init__( self : Dict ,_a : Any=3 ,_a : str=64 ,_a : str=[256, 512, 1024, 2048] ,_a : Union[str, Any]=[3, 4, 6, 3] ,_a : List[Any]="bottleneck" ,_a : Union[str, Any]="relu" ,_a : str=False ,_a : List[str]=None ,_a : Tuple=None ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_a : Optional[int] = num_channels
_a : Optional[Any] = embedding_size
_a : Optional[int] = hidden_sizes
_a : Optional[int] = depths
_a : Dict = layer_type
_a : Optional[int] = hidden_act
_a : Dict = downsample_in_first_stage
_a : Dict = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_a ) + 1 )]
_a, _a : int = get_aligned_output_features_output_indices(
out_features=_a ,out_indices=_a ,stage_names=self.stage_names )
class UpperCAmelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = version.parse('''1.11''' )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return 1E-3
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_snake_case ):
_a : List[Any] = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def __lowercase ( self : Dict ):
'''simple docstring'''
with self.assertRaises(_snake_case ):
_a : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def __lowercase ( self : Dict ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a : str = pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a : List[str] = pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
import PIL.Image
_a : str = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=_snake_case ) as mock_cast_to_python_objects:
_a : int = pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
_a, _a : List[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,_snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def UpperCAmelCase_ (__a : Tuple , __a : int ):
"""simple docstring"""
_a : Dict = pa.BufferReader(__A ) if isinstance(__A , pa.Buffer ) else pa.memory_map(__A )
_a : List[Any] = pa.ipc.open_stream(__A )
_a : Any = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def UpperCAmelCase_ (__a : List[str] , __a : Union[str, Any] ):
"""simple docstring"""
_a : Optional[Any] = pa.BufferOutputStream()
_a : List[Any] = pa.schema(__A ) if fields else None
with ArrowWriter(stream=__A , schema=__A , writer_batch_size=__A ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
_a, _a : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a : Optional[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = pa.BufferOutputStream()
_a : str = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=__A , features=__A ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
_a, _a : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_a : Any = pa.BufferReader(output.getvalue() )
_a : Union[str, Any] = pa.ipc.open_stream(__A )
_a : int = f.read_all()
_a : List[str] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__A )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
_a : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=__A , writer_batch_size=__A , hash_salt='split_name' , check_duplicates=__A , ) as writer:
with pytest.raises(__A ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
_a, _a : List[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 1_0] )
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
_a : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=__A , writer_batch_size=__A , hash_salt='split_name' , check_duplicates=__A , ) as writer:
with pytest.raises(__A ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=1_0 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=1_0 )
_a, _a : int = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 1_0] )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=__A , writer_batch_size=__A , hash_salt='split_name' , check_duplicates=__A , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
_a, _a : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def UpperCAmelCase_ (__a : Optional[int] , __a : Any ):
"""simple docstring"""
_a : Optional[int] = pa.BufferOutputStream()
_a : Union[str, Any] = pa.schema(__A ) if fields else None
with ArrowWriter(stream=__A , schema=__A , writer_batch_size=__A ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
_a, _a : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def UpperCAmelCase_ (__a : Any , __a : Optional[int] ):
"""simple docstring"""
_a : Any = pa.BufferOutputStream()
_a : Union[str, Any] = pa.schema(__A ) if fields else None
with ArrowWriter(stream=__A , schema=__A , writer_batch_size=__A ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
_a, _a : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a : Optional[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def UpperCAmelCase_ (__a : List[str] , __a : Optional[int] ):
"""simple docstring"""
_a : Optional[Any] = pa.BufferOutputStream()
_a : Union[str, Any] = pa.schema(__A ) if fields else None
with ArrowWriter(stream=__A , schema=__A , writer_batch_size=__A ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
_a, _a : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a : Optional[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCAmelCase_ ():
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
_a : str = os.path.join(__A , 'test.arrow' )
with ArrowWriter(path=__A , schema=pa.schema(__A ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
_a, _a : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__A , metadata=writer._schema.metadata )
_check_output(__A , 1 )
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
if pa.types.is_list(__A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCAmelCase_ (__a : List[str] , __a : Union[str, Any] ):
"""simple docstring"""
if isinstance(lst[0] , __A ):
change_first_primitive_element_in_list(lst[0] , __A )
else:
_a : List[str] = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCAmelCase_ (__a : int , __a : Optional[int] , __a : Optional[Any] ):
"""simple docstring"""
_a : int = pa.array(TypedSequence(__A , optimized_int_type=__A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCAmelCase_ (__a : List[Any] , __a : int , __a : List[str] ):
"""simple docstring"""
_a : Tuple = pa.array(OptimizedTypedSequence(__A , col=__A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_a : Optional[int] = copy.deepcopy(__A )
_a : List[str] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__A , __A )
_a : Any = pa.array(OptimizedTypedSequence(__A , col=__A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def UpperCAmelCase_ (__a : Dict , __a : Dict ):
"""simple docstring"""
_a : Optional[Any] = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=__A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = 'mock://dataset-train.arrow'
with ArrowWriter(path=__A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
_a, _a : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__A )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = pa.BufferOutputStream()
with ParquetWriter(stream=__A ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
_a, _a : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_a : Union[str, Any] = pa.BufferReader(output.getvalue() )
_a : List[Any] = pq.read_table(__A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Dict ):
"""simple docstring"""
import PIL.Image
_a : Optional[int] = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__A , format='png' )
_a : Tuple = pa.BufferOutputStream()
with ParquetWriter(
stream=__A , features=Features({'image': Image()} ) , embed_local_files=__A ) as writer:
writer.write({'image': image_path} )
writer.finalize()
_a : int = pa.BufferReader(output.getvalue() )
_a : Union[str, Any] = pq.read_table(__A )
_a : Tuple = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , __A )
with open(__A , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = pa.schema([pa.field('col_1' , pa.string() , nullable=__A )] )
_a : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(stream=__A ) as writer:
writer._build_writer(inferred_schema=__A )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = CpmAntTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
_a : List[Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
_a : Optional[int] = """今天天气真好!"""
_a : Optional[int] = ["""今天""", """天气""", """真""", """好""", """!"""]
_a : Optional[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
_a : Optional[Any] = """今天天气真好!"""
_a : Any = [tokenizer.bos_token] + tokens
_a : Dict = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase )
_a : Optional[Any] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowerCAmelCase = parser.parse_args()
if args.model_type == "bert":
__lowerCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCAmelCase = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowerCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowerCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
__lowerCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowerCAmelCase = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__SCREAMING_SNAKE_CASE )} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __lowercase ( self : Any ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__UpperCAmelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self.train_file is not None:
_a : int = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_a : List[Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase_ (__a : Dict , __a : List[Any] ):
"""simple docstring"""
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
_a : List[Any] = [json.loads(lowerCAmelCase__ ) for line in f.read().splitlines() if (len(lowerCAmelCase__ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
_a : str = {c: dataset[c] for c in dataset.column_names}
_a : int = refs
return Dataset.from_dict(lowerCAmelCase__ )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a, _a, _a : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a, _a, _a : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_a : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_a : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_a : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , )
_a : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , )
else:
_a : str = {}
if data_args.train_file is not None:
_a : int = data_args.train_file
if data_args.validation_file is not None:
_a : Dict = data_args.validation_file
_a : List[Any] = data_args.train_file.split('.' )[-1]
if extension == "txt":
_a : Tuple = 'text'
_a : List[Any] = load_dataset(lowerCAmelCase__ , data_files=lowerCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a : Dict = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_a : Dict = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
_a : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
_a : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
_a : Any = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_a : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
_a : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_a : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_a : List[str] = AutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model.resize_token_embeddings(len(lowerCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_a : Tuple = datasets['train'].column_names
else:
_a : List[Any] = datasets['validation'].column_names
_a : Union[str, Any] = 'text' if 'text' in column_names else column_names[0]
_a : List[Any] = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(__a : Tuple ):
# Remove empty lines
_a : Dict = [line for line in examples['text'] if len(lowerCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=data_args.max_seq_length )
_a : List[str] = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_a : Tuple = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_a : List[str] = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_a : Optional[int] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_a : List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
_a : int = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Tuple = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_a : Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_a : Union[str, Any] = model_args.model_name_or_path
else:
_a : int = None
_a : Tuple = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_a : str = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_a : Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_a : Dict = trainer.evaluate()
_a : str = math.exp(eval_output['eval_loss'] )
_a : str = perplexity
_a : List[str] = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
from math import factorial
def UpperCAmelCase_ (__a : Any , __a : List[str] , __a : Any ):
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_a : List[str] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : str = float(factorial(lowercase__ ) )
coefficient /= factorial(lowercase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[Any] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_a : Union[str, Any] = 6
_a : int = 1
_a : List[str] = 1_9_0_1
_a : Union[str, Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_a : List[Any] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_a : Dict = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_a : Dict = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_a : Union[str, Any] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
from copy import deepcopy
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : Tuple = None ,_a : Any = None ):
'''simple docstring'''
if arr is None and size is not None:
_a : Dict = size
_a : int = [0] * size
elif arr is not None:
self.init(lowercase_ )
else:
raise ValueError('Either arr or size must be specified' )
def __lowercase ( self : int ,_a : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = len(lowercase_ )
_a : List[Any] = deepcopy(lowercase_ )
for i in range(1 ,self.size ):
_a : Tuple = self.next_(lowercase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
_a : Optional[Any] = self.next_(lowercase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __lowercase ( _a : Dict ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __lowercase ( _a : Dict ):
'''simple docstring'''
return index - (index & (-index))
def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_a : Tuple = self.next_(lowercase_ )
def __lowercase ( self : int ,_a : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
self.add(lowercase_ ,value - self.get(lowercase_ ) )
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if right == 0:
return 0
_a : int = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_a : List[Any] = self.prev(lowercase_ )
return result
def __lowercase ( self : Optional[int] ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
return self.prefix(lowercase_ ) - self.prefix(lowercase_ )
def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.query(lowercase_ ,index + 1 )
def __lowercase ( self : Tuple ,_a : Any ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
_a : str = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_a : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""LayoutLMv3FeatureExtractor"""]
__lowerCAmelCase = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = XLMTokenizer
__UpperCAmelCase : Union[str, Any] = False
def __lowercase ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_a : Optional[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
_a : int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_a : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def __lowercase ( self : Any ,_a : Any ):
'''simple docstring'''
_a : Any = """lower newer"""
_a : str = """lower newer"""
return input_text, output_text
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = XLMTokenizer(self.vocab_file ,self.merges_file )
_a : Dict = """lower"""
_a : Dict = ["""low""", """er</w>"""]
_a : Union[str, Any] = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
_a : List[Any] = tokens + ["""<unk>"""]
_a : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
_a : Optional[int] = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCAmelCase )
_a : Any = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCAmelCase )
_a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
import baseaa
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
__lowerCAmelCase = """Hello World!"""
__lowerCAmelCase = baseaa_encode(test)
print(encoded)
__lowerCAmelCase = baseaa_decode(encoded)
print(decoded)
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = 'ernie_m'
__UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[Any] ,_a : Optional[int] = 25_0002 ,_a : Tuple = 768 ,_a : Tuple = 12 ,_a : Any = 12 ,_a : List[str] = 3072 ,_a : Dict = "gelu" ,_a : Any = 0.1 ,_a : List[str] = 0.1 ,_a : Union[str, Any] = 514 ,_a : List[Any] = 0.02 ,_a : List[Any] = 1 ,_a : Optional[Any] = 1E-05 ,_a : str=None ,_a : str=False ,_a : Optional[int]=0.0 ,**_a : str ,):
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_a : Optional[Any] = vocab_size
_a : Optional[int] = hidden_size
_a : Optional[int] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : str = intermediate_size
_a : int = hidden_act
_a : Dict = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Optional[Any] = initializer_range
_a : Optional[int] = layer_norm_eps
_a : Any = classifier_dropout
_a : int = is_decoder
_a : List[Any] = act_dropout
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
__UpperCAmelCase : str
__UpperCAmelCase : int
def UpperCAmelCase_ (__a : Optional[Any] ):
if not isinstance(a__ , a__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def UpperCAmelCase_ (__a : Union[str, Any] ):
if not isinstance(a__ , a__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
_a : List[Any] = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(a__ ),
}
return response
def UpperCAmelCase_ (__a : Dict , __a : Optional[int] ):
if not isinstance(a__ , a__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
_a : List[Any] = int(a__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(a__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
_a : Optional[int] = [''''''] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
_a : Any = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowerCAmelCase = '''Provide a string that I will generate its BWT transform: '''
__lowerCAmelCase = input(entry_msg).strip()
__lowerCAmelCase = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
__lowerCAmelCase = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ():
"""simple docstring"""
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : int = 1
_a : Any = 2
while i * i <= n:
_a : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCAmelCase_ ():
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(__SCREAMING_SNAKE_CASE ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = KandinskyImgaImgPipeline
__UpperCAmelCase : List[str] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
__UpperCAmelCase : Optional[Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__UpperCAmelCase : Dict = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCAmelCase : int = False
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self : int ):
'''simple docstring'''
return 100
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,)
_a : Any = MultilingualCLIP(lowercase_ )
_a : int = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_a : Optional[int] = UNetaDConditionModel(**lowercase_ )
return model
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : str = self.dummy_text_encoder
_a : List[str] = self.dummy_tokenizer
_a : Union[str, Any] = self.dummy_unet
_a : str = self.dummy_movq
_a : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_a : Tuple = DDIMScheduler(**lowercase_ )
_a : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowercase ( self : Optional[Any] ,_a : Any ,_a : Union[str, Any]=0 ):
'''simple docstring'''
_a : Tuple = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
_a : Any = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
_a : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
_a : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[Any] = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((256, 256) )
if str(lowercase_ ).startswith('mps' ):
_a : int = torch.manual_seed(lowercase_ )
else:
_a : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_a : Dict = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = """cpu"""
_a : Any = self.get_dummy_components()
_a : Optional[Any] = self.pipeline_class(**lowercase_ )
_a : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_a : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase_ ) )
_a : int = output.images
_a : Optional[Any] = pipe(
**self.get_dummy_inputs(lowercase_ ) ,return_dict=lowercase_ ,)[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : str = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_a : List[Any] = """A red cartoon frog, 4k"""
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
_a : Tuple = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' ,torch_dtype=torch.floataa )
_a : Dict = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
_a : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_a : int = pipe_prior(
lowercase_ ,generator=lowercase_ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
_a : Dict = pipeline(
lowercase_ ,image=lowercase_ ,image_embeds=lowercase_ ,negative_image_embeds=lowercase_ ,generator=lowercase_ ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,)
_a : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ ,lowercase_ )
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCAmelCase = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = list(s_dict.keys() )
for key in keys:
_a : Optional[Any] = R'.*/layers_(\d+)'
_a : Optional[Any] = key
if re.match(__a , __a ):
_a : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , __a )
_a : Tuple = R'(encoder|decoder)\/'
if re.match(__a , __a ):
_a : Union[str, Any] = re.match(__a , __a ).groups()
if groups[0] == "encoder":
_a : List[Any] = re.sub(R'/mlp/' , R'/1/mlp/' , __a )
_a : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , __a )
elif groups[0] == "decoder":
_a : Optional[Any] = re.sub(R'/mlp/' , R'/2/mlp/' , __a )
_a : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , __a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a : str = new_key.replace(__a , __a )
print(f"""{key} -> {new_key}""" )
_a : int = s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Any = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Union[str, Any] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_a : int = s_dict[key].shape[0]
_a : Optional[int] = s_dict[key]
for idx in range(__a ):
_a : Tuple = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(__a )
return s_dict
__lowerCAmelCase = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any] ):
"""simple docstring"""
import regex as re
with open(__a , 'r' ) as f:
_a : Any = f.read()
_a : Optional[int] = re.findall(R'(.*) = ([0-9.]*)' , __a )
_a : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a : Optional[int] = float(__a ) if '.' in value else int(__a )
_a : List[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , __a )[0]
_a : Optional[int] = str(activation[1] )
_a : Union[str, Any] = num_experts
_a : int = SwitchTransformersConfig(**__a )
return config
def UpperCAmelCase_ (__a : List[Any] , __a : int , __a : Union[str, Any]=None , __a : Union[str, Any]="./" , __a : Dict=8 ):
"""simple docstring"""
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
_a : Any = checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
_a : Union[str, Any] = convert_gin_to_config(__a , __a )
else:
_a : Optional[int] = SwitchTransformersConfig.from_pretrained(__a )
_a : Optional[Any] = SwitchTransformersForConditionalGeneration(__a )
_a : Optional[int] = flax_params['target']
_a : Optional[Any] = flatten_dict(__a , sep='/' )
_a : Union[str, Any] = rename_keys(__a )
_a : Dict = unflatten_dict(__a , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a , __a )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
__lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def UpperCAmelCase_ ():
_a : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] )
_a : List[Any] = g.get_repo('huggingface/accelerate' )
_a : Union[str, Any] = repo.get_issues(state='open' )
for issue in open_issues:
_a : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__lowerCamelCase )
_a : Any = comments[0] if len(__lowerCamelCase ) > 0 else None
_a : Optional[Any] = dt.utcnow()
_a : Optional[int] = (current_time - issue.updated_at).days
_a : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
from collections import deque
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : str ,_a : Optional[int] ,_a : Optional[Any] ):
'''simple docstring'''
_a : Dict = process_name # process name
_a : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_a : Optional[Any] = arrival_time
_a : Union[str, Any] = burst_time # remaining burst time
_a : str = 0 # total time of the process wait in ready queue
_a : Union[str, Any] = 0 # time from arrival time to completion time
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Tuple ,_a : Dict ,_a : Any ,_a : int ,):
'''simple docstring'''
_a : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
_a : Optional[int] = time_slices
# unfinished process is in this ready_queue
_a : Dict = queue
# current time
_a : Union[str, Any] = current_time
# finished process is in this sequence queue
_a : deque[Process] = deque()
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __lowercase ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
_a : Dict = []
for i in range(len(a_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
_a : List[Any] = []
for i in range(len(a_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __lowercase ( self : int ,_a : List[str] ):
'''simple docstring'''
_a : List[str] = []
for i in range(len(a_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __lowercase ( self : Optional[Any] ,_a : List[str] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def __lowercase ( self : str ,_a : List[str] ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowercase ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
_a : deque[Process] = deque() # sequence deque of finished process
while len(a_ ) != 0:
_a : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(a_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_a : Dict = 0
# set the process's turnaround time because it is finished
_a : Any = self.current_time - cp.arrival_time
# set the completion time
_a : str = self.current_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowercase ( self : Tuple ,_a : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(a_ ) ):
_a : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(a_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_a : int = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(a_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_a : Dict = 0
# set the finish time
_a : str = self.current_time
# update the process' turnaround time because it is finished
_a : int = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowercase ( self : str ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_a : int = self.round_robin(
self.ready_queue ,self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__lowerCAmelCase = Process("""P1""", 0, 5_3)
__lowerCAmelCase = Process("""P2""", 0, 1_7)
__lowerCAmelCase = Process("""P3""", 0, 6_8)
__lowerCAmelCase = Process("""P4""", 0, 2_4)
__lowerCAmelCase = 3
__lowerCAmelCase = [1_7, 2_5]
__lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
__lowerCAmelCase = Process("""P1""", 0, 5_3)
__lowerCAmelCase = Process("""P2""", 0, 1_7)
__lowerCAmelCase = Process("""P3""", 0, 6_8)
__lowerCAmelCase = Process("""P4""", 0, 2_4)
__lowerCAmelCase = 3
__lowerCAmelCase = [1_7, 2_5]
__lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
__lowerCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
__lowerCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCAmelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str ,_a : List[str] ,_a : Any=16 ,_a : Any=13 ,_a : Any=7 ,_a : Optional[Any]=14 ,_a : Union[str, Any]=10 ,_a : Tuple=19 ,_a : Any=5 ,_a : Optional[int]=4 ,_a : int=True ,_a : List[str]=16 ,_a : Tuple=2 ,_a : Any=4 ,_a : Tuple=4 ,_a : Any="gelu" ,_a : Optional[Any]=0.1 ,_a : Tuple=0.1 ,_a : Optional[int]=[1, 2, 3, 4, 5] ,_a : List[Any]=25 ,_a : Union[str, Any]=5 ,):
'''simple docstring'''
_a : Optional[int] = d_model
_a : Optional[Any] = parent
_a : int = batch_size
_a : str = prediction_length
_a : str = context_length
_a : Optional[int] = cardinality
_a : List[Any] = num_time_features
_a : List[Any] = lags_sequence
_a : str = embedding_dimension
_a : int = is_training
_a : str = hidden_size
_a : int = num_hidden_layers
_a : int = num_attention_heads
_a : int = intermediate_size
_a : Optional[int] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Dict = context_length
_a : int = prediction_length + label_length
_a : Optional[int] = label_length
_a : List[Any] = moving_average
_a : List[str] = autocorrelation_factor
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,prediction_length=self.prediction_length ,context_length=self.context_length ,label_length=self.label_length ,lags_sequence=self.lags_sequence ,num_time_features=self.num_time_features ,num_static_categorical_features=1 ,cardinality=[self.cardinality] ,embedding_dimension=[self.embedding_dimension] ,moving_average=self.moving_average ,)
def __lowercase ( self : str ,_a : Optional[int] ):
'''simple docstring'''
_a : int = config.context_length + max(config.lags_sequence )
_a : Optional[int] = ids_tensor([self.batch_size, 1] ,config.cardinality[0] )
_a : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a : Any = floats_tensor([self.batch_size, _past_length] )
_a : Union[str, Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a : List[str] = floats_tensor([self.batch_size, config.prediction_length] )
_a : Tuple = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = self.get_config()
_a : Dict = self.prepare_autoformer_inputs_dict(__UpperCAmelCase )
return config, inputs_dict
def __lowercase ( self : str ):
'''simple docstring'''
_a, _a : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Dict ,_a : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
_a : Dict = AutoformerModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_a : List[str] = model(**__UpperCAmelCase )
_a : Dict = outputs.encoder_last_hidden_state
_a : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = model.get_encoder()
encoder.save_pretrained(__UpperCAmelCase )
_a : int = AutoformerEncoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
_a, _a, _a, _a, _a : Optional[int] = model.create_network_inputs(**__UpperCAmelCase )
_a, _a : int = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,dim=-1 ,)
_a : str = encoder(inputs_embeds=__UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] ,dim=1 )
.unsqueeze(1 )
.repeat(1 ,config.prediction_length ,1 )
)
_a : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,device=enc_input.device ,)
_a : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
_a : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any = model.get_decoder()
decoder.save_pretrained(__UpperCAmelCase )
_a : str = AutoformerDecoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
_a : Union[str, Any] = decoder(
trend=__UpperCAmelCase ,inputs_embeds=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Tuple = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = AutoformerModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
_a, _a : Optional[Any] = model_class.from_pretrained(__UpperCAmelCase ,output_loading_info=__UpperCAmelCase )
self.assertEqual(info['missing_keys'] ,[] )
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCAmelCase )
@unittest.skip(reason='Model has no tokens embeddings' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = inspect.signature(getattr(__UpperCAmelCase ,'forward' ) )
# The main input is the name of the argument after `self`
_a : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name ,__UpperCAmelCase )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(__UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[Any] = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(__UpperCAmelCase )] ,__UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = True
_a : Optional[Any] = getattr(self.model_tester ,'seq_length' ,__UpperCAmelCase )
_a : List[str] = getattr(self.model_tester ,'decoder_seq_length' ,__UpperCAmelCase )
_a : Optional[int] = getattr(self.model_tester ,'encoder_seq_length' ,__UpperCAmelCase )
_a : str = getattr(self.model_tester ,'d_model' ,__UpperCAmelCase )
_a : str = getattr(self.model_tester ,'num_attention_heads' ,__UpperCAmelCase )
_a : Dict = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a : Union[str, Any] = True
_a : Dict = False
_a : Tuple = True
_a : Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[str] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
_a : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Dict = True
_a : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
_a : Any = outputs.encoder_attentions
self.assertEqual(len(__UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
_a : Optional[int] = len(__UpperCAmelCase )
_a : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
# decoder attentions
_a : Any = outputs.decoder_attentions
self.assertIsInstance(__UpperCAmelCase ,(list, tuple) )
self.assertEqual(len(__UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# cross attentions
_a : Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(__UpperCAmelCase ,(list, tuple) )
self.assertEqual(len(__UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# Check attention is always last and order is fine
_a : Dict = True
_a : List[str] = True
_a : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
self.assertEqual(out_len + 2 ,len(__UpperCAmelCase ) )
_a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
@is_flaky()
def __lowercase ( self : Any ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ (__a : List[str]="train-batch.pt" ):
"""simple docstring"""
_a : str = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=__a , repo_type='dataset' )
_a : List[Any] = torch.load(__a , map_location=__a )
return batch
@require_torch
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(__UpperCAmelCase )
_a : Dict = prepare_batch()
with torch.no_grad():
_a : Dict = model(
past_values=batch['past_values'] ,past_time_features=batch['past_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,static_categorical_features=batch['static_categorical_features'] ,future_values=batch['future_values'] ,future_time_features=batch['future_time_features'] ,)[0]
_a : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape ,__UpperCAmelCase )
_a : Tuple = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] ,device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(__UpperCAmelCase )
_a : Tuple = prepare_batch('val-batch.pt' )
with torch.no_grad():
_a : int = model(
past_values=batch['past_values'] ,past_time_features=batch['past_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,static_categorical_features=batch['static_categorical_features'] ,).encoder_last_hidden_state
_a : Optional[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape ,__UpperCAmelCase )
_a : List[Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] ,device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(__UpperCAmelCase )
_a : List[Any] = prepare_batch('val-batch.pt' )
with torch.no_grad():
_a : Tuple = model.generate(
static_categorical_features=batch['static_categorical_features'] ,past_time_features=batch['past_time_features'] ,past_values=batch['past_values'] ,future_time_features=batch['future_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,)
_a : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape ,__UpperCAmelCase )
_a : str = torch.tensor([3130.6763, 4056.5293, 7053.0786] ,device=__UpperCAmelCase )
_a : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,__UpperCAmelCase ,rtol=1E-1 ) )
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[str] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : Optional[Any] ,_a : Dict=13 ,_a : Dict=7 ,_a : str=True ,_a : str=True ,_a : List[str]=True ,_a : Dict=True ,_a : Any=99 ,_a : Dict=32 ,_a : Optional[Any]=2 ,_a : Optional[int]=4 ,_a : Optional[int]=37 ,_a : Any="gelu" ,_a : Any=0.1 ,_a : Tuple=0.1 ,_a : List[str]=512 ,_a : str=16 ,_a : Any=2 ,_a : Any=0.02 ,_a : List[Any]=3 ,_a : Dict=4 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : Tuple = 13
_a : List[str] = 7
_a : List[str] = True
_a : List[str] = True
_a : Dict = True
_a : Any = True
_a : str = 99
_a : List[Any] = 384
_a : Any = 2
_a : Union[str, Any] = 4
_a : Optional[int] = 37
_a : str = """gelu"""
_a : List[Any] = 0.1
_a : Union[str, Any] = 0.1
_a : int = 512
_a : str = 16
_a : int = 2
_a : List[str] = 0.02
_a : List[str] = 3
_a : List[str] = 4
_a : Union[str, Any] = 128
_a : int = 2
_a : List[Any] = 9
_a : Tuple = 1
_a : Any = None
def __lowercase ( self : str ):
'''simple docstring'''
_a : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Optional[Any] = None
if self.use_input_mask:
_a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_a : str = None
if self.use_token_type_ids:
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_a : str = None
_a : List[Any] = None
_a : Optional[int] = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
_a : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=_a ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : str ,_a : str ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,_a : Any ,_a : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
_a : int = TFConvBertModel(config=_a )
_a : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : List[str] = [input_ids, input_mask]
_a : Any = model(_a )
_a : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : List[str] ,_a : Dict ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : Any ,_a : Dict ):
'''simple docstring'''
_a : List[str] = TFConvBertForMaskedLM(config=_a )
_a : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_a : str = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Dict ,_a : str ,_a : Optional[Any] ,_a : Tuple ,_a : int ,_a : Any ,_a : str ,_a : List[str] ):
'''simple docstring'''
_a : Any = self.num_labels
_a : List[str] = TFConvBertForSequenceClassification(config=_a )
_a : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Tuple ,_a : List[str] ,_a : Dict ,_a : List[str] ,_a : Dict ,_a : Optional[Any] ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
_a : Optional[Any] = self.num_choices
_a : List[Any] = TFConvBertForMultipleChoice(config=_a )
_a : Tuple = tf.tile(tf.expand_dims(_a ,1 ) ,(1, self.num_choices, 1) )
_a : Optional[int] = tf.tile(tf.expand_dims(_a ,1 ) ,(1, self.num_choices, 1) )
_a : List[Any] = tf.tile(tf.expand_dims(_a ,1 ) ,(1, self.num_choices, 1) )
_a : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_a : int = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : Optional[int] ,_a : Any ,_a : str ,_a : List[Any] ,_a : Any ,_a : int ,_a : Optional[Any] ,_a : Any ):
'''simple docstring'''
_a : List[Any] = self.num_labels
_a : Optional[int] = TFConvBertForTokenClassification(config=_a )
_a : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_a : List[str] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : int ,_a : str ):
'''simple docstring'''
_a : Optional[Any] = TFConvBertForQuestionAnswering(config=_a )
_a : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_a : str = model(_a )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = self.prepare_config_and_inputs()
(
_a
) : Optional[int] = config_and_inputs
_a : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = TFConvBertModelTester(self )
_a : str = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def __lowercase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = True
_a : List[str] = True
if hasattr(_a ,'use_cache' ):
_a : str = True
_a : int = getattr(self.model_tester ,'encoder_seq_length' ,self.model_tester.seq_length )
_a : Optional[Any] = getattr(self.model_tester ,'key_length' ,_a )
for model_class in self.all_model_classes:
_a : int = self._prepare_for_class(_a ,_a )
_a : Dict = model_class(_a )
_a : List[Any] = len(model(_a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a ,saved_model=_a )
_a : Union[str, Any] = os.path.join(_a ,'saved_model' ,'1' )
_a : Dict = tf.keras.models.load_model(_a )
_a : Any = model(_a )
if self.is_encoder_decoder:
_a : str = outputs["""encoder_hidden_states"""]
_a : List[str] = outputs["""encoder_attentions"""]
else:
_a : Union[str, Any] = outputs["""hidden_states"""]
_a : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(_a ) ,_a )
_a : Dict = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_a ) ,_a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : str = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = True
_a : Optional[int] = getattr(self.model_tester ,'decoder_seq_length' ,self.model_tester.seq_length )
_a : List[str] = getattr(self.model_tester ,'encoder_seq_length' ,self.model_tester.seq_length )
_a : Any = getattr(self.model_tester ,'key_length' ,_a )
_a : Any = getattr(self.model_tester ,'key_length' ,_a )
def check_decoder_attentions_output(_a : Optional[Any] ):
_a : Optional[Any] = len(_a )
self.assertEqual(out_len % 2 ,0 )
_a : int = outputs.decoder_attentions
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(_a : Optional[int] ):
_a : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
_a : Optional[int] = True
_a : List[str] = False
_a : Union[str, Any] = model_class(_a )
_a : Any = model(self._prepare_for_class(_a ,_a ) )
_a : List[str] = len(_a )
self.assertEqual(config.output_hidden_states ,_a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
_a : Dict = model_class(_a )
_a : Dict = model(self._prepare_for_class(_a ,_a ) )
self.assertEqual(config.output_hidden_states ,_a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_a : Dict = True
_a : List[Any] = model_class(_a )
_a : Any = model(self._prepare_for_class(_a ,_a ) )
self.assertEqual(config.output_hidden_states ,_a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
_a : Tuple = True
_a : Tuple = True
_a : Optional[Any] = model_class(_a )
_a : Any = model(self._prepare_for_class(_a ,_a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(_a ) )
self.assertEqual(model.config.output_hidden_states ,_a )
check_encoder_attentions_output(_a )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_a : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a : Optional[int] = model(_a )[0]
_a : str = [1, 6, 768]
self.assertEqual(output.shape ,_a )
_a : Optional[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,_a ,atol=1E-4 )
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : str = 'AutoTokenizer'
__UpperCAmelCase : List[Any] = ['tokenizer']
__UpperCAmelCase : str = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[int]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_a : Optional[Any] = speaker_embeddings
@classmethod
def __lowercase ( cls : str ,_a : str ,_a : List[str]="speaker_embeddings_path.json" ,**_a : int ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_a : Tuple = get_file_from_repo(
UpperCamelCase_ ,UpperCamelCase_ ,subfolder=kwargs.pop('subfolder' ,UpperCamelCase_ ) ,cache_dir=kwargs.pop('cache_dir' ,UpperCamelCase_ ) ,force_download=kwargs.pop('force_download' ,UpperCamelCase_ ) ,proxies=kwargs.pop('proxies' ,UpperCamelCase_ ) ,resume_download=kwargs.pop('resume_download' ,UpperCamelCase_ ) ,local_files_only=kwargs.pop('local_files_only' ,UpperCamelCase_ ) ,use_auth_token=kwargs.pop('use_auth_token' ,UpperCamelCase_ ) ,revision=kwargs.pop('revision' ,UpperCamelCase_ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCamelCase_ ,UpperCamelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_a : Any = None
else:
with open(UpperCamelCase_ ) as speaker_embeddings_json:
_a : Union[str, Any] = json.load(UpperCamelCase_ )
else:
_a : Dict = None
_a : int = AutoTokenizer.from_pretrained(UpperCamelCase_ ,**UpperCamelCase_ )
return cls(tokenizer=UpperCamelCase_ ,speaker_embeddings=UpperCamelCase_ )
def __lowercase ( self : str ,_a : Optional[int] ,_a : List[str]="speaker_embeddings_path.json" ,_a : Optional[int]="speaker_embeddings" ,_a : bool = False ,**_a : List[Any] ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCamelCase_ ,UpperCamelCase_ ,'v2' ) ,exist_ok=UpperCamelCase_ )
_a : List[Any] = {}
_a : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a : List[str] = self._load_voice_preset(UpperCamelCase_ )
_a : Optional[int] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,UpperCamelCase_ ,F"""{prompt_key}_{key}""" ) ,voice_preset[key] ,allow_pickle=UpperCamelCase_ ,)
_a : Dict = os.path.join(UpperCamelCase_ ,F"""{prompt_key}_{key}.npy""" )
_a : Dict = tmp_dict
with open(os.path.join(UpperCamelCase_ ,UpperCamelCase_ ) ,'w' ) as fp:
json.dump(UpperCamelCase_ ,UpperCamelCase_ )
super().save_pretrained(UpperCamelCase_ ,UpperCamelCase_ ,**UpperCamelCase_ )
def __lowercase ( self : int ,_a : str = None ,**_a : Tuple ):
'''simple docstring'''
_a : Optional[int] = self.speaker_embeddings[voice_preset]
_a : Dict = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_a : List[str] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,UpperCamelCase_ ) ,cache_dir=kwargs.pop('cache_dir' ,UpperCamelCase_ ) ,force_download=kwargs.pop('force_download' ,UpperCamelCase_ ) ,proxies=kwargs.pop('proxies' ,UpperCamelCase_ ) ,resume_download=kwargs.pop('resume_download' ,UpperCamelCase_ ) ,local_files_only=kwargs.pop('local_files_only' ,UpperCamelCase_ ) ,use_auth_token=kwargs.pop('use_auth_token' ,UpperCamelCase_ ) ,revision=kwargs.pop('revision' ,UpperCamelCase_ ) ,)
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.""" )
_a : str = np.load(UpperCamelCase_ )
return voice_preset_dict
def __lowercase ( self : Tuple ,_a : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Tuple ,_a : Tuple=None ,_a : int=None ,_a : Optional[int]="pt" ,_a : Optional[int]=256 ,_a : Dict=False ,_a : str=True ,_a : str=False ,**_a : List[Any] ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
if (
isinstance(UpperCamelCase_ ,UpperCamelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a : List[Any] = self._load_voice_preset(UpperCamelCase_ )
else:
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) and not voice_preset.endswith('.npz' ):
_a : Dict = voice_preset + '.npz'
_a : Union[str, Any] = np.load(UpperCamelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCamelCase_ ,**UpperCamelCase_ )
_a : Optional[int] = BatchFeature(data=UpperCamelCase_ ,tensor_type=UpperCamelCase_ )
_a : Union[str, Any] = self.tokenizer(
UpperCamelCase_ ,return_tensors=UpperCamelCase_ ,padding='max_length' ,max_length=UpperCamelCase_ ,return_attention_mask=UpperCamelCase_ ,return_token_type_ids=UpperCamelCase_ ,add_special_tokens=UpperCamelCase_ ,**UpperCamelCase_ ,)
if voice_preset is not None:
_a : Tuple = voice_preset
return encoded_text
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Dict = 'time_series_transformer'
__UpperCAmelCase : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[Any] ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : str = "student_t" ,_a : str = "nll" ,_a : int = 1 ,_a : List[int] = [1, 2, 3, 4, 5, 6, 7] ,_a : Optional[Union[str, bool]] = "mean" ,_a : int = 0 ,_a : int = 0 ,_a : int = 0 ,_a : int = 0 ,_a : Optional[List[int]] = None ,_a : Optional[List[int]] = None ,_a : int = 32 ,_a : int = 32 ,_a : int = 2 ,_a : int = 2 ,_a : int = 2 ,_a : int = 2 ,_a : bool = True ,_a : str = "gelu" ,_a : int = 64 ,_a : float = 0.1 ,_a : float = 0.1 ,_a : float = 0.1 ,_a : float = 0.1 ,_a : float = 0.1 ,_a : int = 100 ,_a : float = 0.02 ,_a : int=True ,**_a : List[str] ,):
'''simple docstring'''
_a : List[Any] = prediction_length
_a : Tuple = context_length or prediction_length
_a : Any = distribution_output
_a : Any = loss
_a : str = input_size
_a : Dict = num_time_features
_a : Dict = lags_sequence
_a : List[str] = scaling
_a : List[str] = num_dynamic_real_features
_a : Union[str, Any] = num_static_real_features
_a : Dict = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_a : Optional[Any] = cardinality
else:
_a : Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_a : int = embedding_dimension
else:
_a : List[Any] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
_a : str = num_parallel_samples
# Transformer architecture configuration
_a : Optional[int] = input_size * len(__lowerCAmelCase ) + self._number_of_features
_a : Optional[Any] = d_model
_a : str = encoder_attention_heads
_a : Tuple = decoder_attention_heads
_a : Any = encoder_ffn_dim
_a : Optional[Any] = decoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Any = decoder_layers
_a : List[str] = dropout
_a : Union[str, Any] = attention_dropout
_a : int = activation_dropout
_a : Dict = encoder_layerdrop
_a : List[str] = decoder_layerdrop
_a : List[str] = activation_function
_a : Dict = init_std
_a : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__lowerCAmelCase ,**__lowerCAmelCase )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__UpperCAmelCase : str = "audio"
__UpperCAmelCase : str = "labels"
def __lowercase ( self : str ,_a : Union[str, Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,a_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_a : Any = copy.deepcopy(self )
_a : Dict = self.label_schema.copy()
_a : Tuple = features[self.label_column]
_a : Tuple = label_schema
return task_template
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__lowerCAmelCase = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
__lowerCAmelCase = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
__lowerCAmelCase = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] ,reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] ,)
def __lowercase ( self : Union[str, Any] ,_a : Tuple ,_a : List[Any] ,_a : int=None ,_a : Union[str, Any]=True ,_a : List[Any]=False ):
'''simple docstring'''
if rouge_types is None:
_a : Optional[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_a : List[str] = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase ,use_stemmer=__lowerCAmelCase )
if use_aggregator:
_a : Dict = scoring.BootstrapAggregator()
else:
_a : Union[str, Any] = []
for ref, pred in zip(__lowerCAmelCase ,__lowerCAmelCase ):
_a : int = scorer.score(__lowerCAmelCase ,__lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
_a : Dict = aggregator.aggregate()
else:
_a : Union[str, Any] = {}
for key in scores[0]:
_a : Optional[Any] = [score[key] for score in scores]
return result
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowerCAmelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[Any] ):
"""simple docstring"""
return max(metric_fn(a__ , a__ ) for gt in ground_truths )
def UpperCAmelCase_ (__a : List[str] , __a : str , __a : Optional[Any] ):
"""simple docstring"""
_a : Tuple = [line.strip() for line in open(a__ , 'r' ).readlines()]
_a : str = []
if args.gold_data_mode == "qa":
_a : int = pd.read_csv(a__ , sep='\t' , header=a__ )
for answer_list in data[1]:
_a : Optional[Any] = ast.literal_eval(a__ )
answers.append(a__ )
else:
_a : Tuple = [line.strip() for line in open(a__ , 'r' ).readlines()]
_a : Optional[Any] = [[reference] for reference in references]
_a : Optional[Any] = 0
for prediction, ground_truths in zip(a__ , a__ ):
total += 1
em += metric_max_over_ground_truths(a__ , a__ , a__ )
fa += metric_max_over_ground_truths(a__ , a__ , a__ )
_a : Tuple = 100.0 * em / total
_a : Optional[Any] = 100.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def UpperCAmelCase_ (__a : Dict , __a : List[Any] , __a : Optional[Any] ):
"""simple docstring"""
_a : Any = args.k
_a : int = [line.strip() for line in open(a__ , 'r' ).readlines()]
_a : Any = [line.strip() for line in open(a__ , 'r' ).readlines()]
_a : Dict = 0
for hypo, reference in zip(a__ , a__ ):
_a : Optional[int] = set(hypo.split('\t' )[:k] )
_a : List[str] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_a : Union[str, Any] = 100.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def UpperCAmelCase_ (__a : str , __a : Dict , __a : List[Any] ):
"""simple docstring"""
def strip_title(__a : str ):
if title.startswith('"' ):
_a : List[str] = title[1:]
if title.endswith('"' ):
_a : Optional[int] = title[:-1]
return title
_a : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a__ , return_tensors='pt' , padding=a__ , truncation=a__ , )['input_ids'].to(args.device )
_a : List[str] = rag_model.rag.question_encoder(a__ )
_a : Union[str, Any] = question_enc_outputs[0]
_a : Any = rag_model.retriever(
a__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
_a : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_a : Tuple = []
for docs in all_docs:
_a : List[Any] = [strip_title(a__ ) for title in docs['title']]
provenance_strings.append('\t'.join(a__ ) )
return provenance_strings
def UpperCAmelCase_ (__a : Any , __a : Any , __a : List[Any] ):
"""simple docstring"""
with torch.no_grad():
_a : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a__ , return_tensors='pt' , padding=a__ , truncation=a__ )
_a : Tuple = inputs_dict.input_ids.to(args.device )
_a : Any = inputs_dict.attention_mask.to(args.device )
_a : List[str] = rag_model.generate( # rag_model overwrites generate
a__ , attention_mask=a__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_a : Optional[Any] = rag_model.retriever.generator_tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
if args.print_predictions:
for q, a in zip(a__ , a__ ):
logger.info('Q: {} - A: {}'.format(a__ , a__ ) )
return answers
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=a__ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=a__ , choices=['exact', 'compressed', 'legacy'] , type=a__ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=a__ , type=a__ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=a__ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=a__ , type=a__ , required=a__ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=a__ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=a__ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=a__ , type=a__ , required=a__ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=a__ , type=a__ , required=a__ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=a__ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=a__ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=a__ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=a__ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=a__ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=a__ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
_a : List[str] = parser.parse_args()
_a : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[str] = {}
if args.model_type is None:
_a : Dict = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
_a : List[str] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
_a : Tuple = args.n_docs
if args.index_name is not None:
_a : List[str] = args.index_name
if args.index_path is not None:
_a : Tuple = args.index_path
else:
_a : Optional[int] = BartForConditionalGeneration
_a : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , a__ )
_a : List[Any] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
_a : str = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(a__ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(a__ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
_a : Optional[Any] = RagRetriever.from_pretrained(a__ , **a__ )
_a : int = model_class.from_pretrained(a__ , retriever=a__ , **a__ )
model.retriever.init_retrieval()
else:
_a : Optional[int] = model_class.from_pretrained(a__ , **a__ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
_a : Any = []
for line in tqdm(a__ ):
questions.append(line.strip() )
if len(a__ ) == args.eval_batch_size:
_a : Dict = evaluate_batch_fn(a__ , a__ , a__ )
preds_file.write('\n'.join(a__ ) + '\n' )
preds_file.flush()
_a : Optional[Any] = []
if len(a__ ) > 0:
_a : int = evaluate_batch_fn(a__ , a__ , a__ )
preds_file.write('\n'.join(a__ ) )
preds_file.flush()
score_fn(a__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowerCAmelCase = get_args()
main(args)
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( __snake_case ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ['pixel_values']
def __init__( self : Optional[int] ,_a : List[str] = True ,_a : Any = None ,_a : List[str] = PIL.Image.BICUBIC ,_a : str = True ,_a : Any = None ,_a : Optional[int] = 1 / 255 ,_a : int = True ,_a : List[str] = True ,_a : List[Any] = None ,_a : Any = None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
_a : Dict = size if size is not None else {'height': 256, 'width': 256}
_a : List[Any] = get_size_dict(_a )
_a : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_a : Optional[int] = get_size_dict(_a ,param_name='crop_size' )
_a : List[str] = do_resize
_a : Any = size
_a : Union[str, Any] = resample
_a : Tuple = do_center_crop
_a : Dict = crop_size
_a : Tuple = do_rescale
_a : int = rescale_factor
_a : Optional[Any] = do_normalize
_a : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self : List[Any] ,_a : str ,_a : Any ,_a : Dict = PIL.Image.BICUBIC ,_a : str = None ,**_a : Dict ,):
'''simple docstring'''
_a : Dict = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return resize(
_a ,size=(size['height'], size['width']) ,resample=_a ,data_format=_a ,**_a )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : List[Any] = None ,**_a : List[str] ,):
'''simple docstring'''
_a : List[Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return center_crop(_a ,size=(size['height'], size['width']) ,data_format=_a ,**_a )
def __lowercase ( self : Union[str, Any] ,_a : List[Any] ,_a : Any ,_a : List[Any] = None ,**_a : List[str] ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def __lowercase ( self : Any ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Any ,_a : Optional[int] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def __lowercase ( self : str ,_a : Optional[Any] ,_a : str = None ,_a : Union[str, Any] = None ,_a : Tuple=None ,_a : Optional[Any] = None ,_a : List[Any] = None ,_a : str = None ,_a : List[Any] = None ,_a : Dict = None ,_a : Tuple = None ,_a : Tuple = None ,_a : List[Any] = None ,_a : Optional[int] = ChannelDimension.FIRST ,**_a : Tuple ,):
'''simple docstring'''
_a : Any = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = resample if resample is not None else self.resample
_a : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : str = do_rescale if do_rescale is not None else self.do_rescale
_a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_a : int = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : int = size if size is not None else self.size
_a : Optional[Any] = get_size_dict(_a )
_a : Tuple = crop_size if crop_size is not None else self.crop_size
_a : Optional[Any] = get_size_dict(_a ,param_name='crop_size' )
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_a : Optional[int] = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : Optional[Any] = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
_a : Optional[Any] = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
_a : Dict = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
_a : str = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
_a : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_a ,tensor_type=_a )
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
_a : int = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_a : int = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_a : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_a : Any = model(_a )['last_hidden_state'].detach()
self.assertEqual(output.shape ,_a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_a ,atol=1E-3 ) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
_a : Any = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_a : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
_a : int = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_a : List[Any] = model(_a )['last_hidden_state'].detach()
self.assertEqual(output.shape ,_a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_a ,atol=1E-3 ) )
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("""fixtures""")
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = mock.Mock()
_a : Any = 500
_a : Tuple = {}
_a : int = HTTPError
_a : Tuple = {}
# Download this model to make sure it's in the cache.
_a : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
_a : Any = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : Optional[int] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
_a : int = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' ,subfolder='feature_extractor' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : int ):
'''simple docstring'''
_a : str = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase ( cls : Dict ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('test-image-processor' ,use_auth_token=self._token )
_a : str = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE ,repo_id='test-image-processor' ,push_to_hub=_SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
_a : List[str] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : str ):
'''simple docstring'''
_a : int = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('valid_org/test-image-processor' ,use_auth_token=self._token )
_a : Optional[Any] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE ,repo_id='valid_org/test-image-processor-org' ,push_to_hub=_SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
_a : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_a : List[Any] = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('test-dynamic-image-processor' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map ,{'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} ,)
_a : List[str] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" ,trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ ,'CustomImageProcessor' )
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str = 1_0_0_0 ):
"""simple docstring"""
_a : Optional[Any] = 1, 1
_a : Any = []
for i in range(1 , n + 1 ):
_a : Union[str, Any] = prev_numerator + 2 * prev_denominator
_a : List[Any] = prev_numerator + prev_denominator
if len(str(_UpperCAmelCase ) ) > len(str(_UpperCAmelCase ) ):
result.append(_UpperCAmelCase )
_a : Union[str, Any] = numerator
_a : Union[str, Any] = denominator
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""pixel_values"""]
def __init__( self : Any ,_a : int = True ,_a : Dict = 32 ,_a : Any=PILImageResampling.BILINEAR ,_a : Optional[int] = True ,**_a : Union[str, Any] ,):
'''simple docstring'''
_a : Optional[Any] = do_resize
_a : Optional[Any] = do_rescale
_a : List[Any] = size_divisor
_a : Tuple = resample
super().__init__(**lowerCAmelCase__ )
def __lowercase ( self : List[str] ,_a : str ,_a : int ,_a : Any ,_a : int = None ,**_a : List[Any] ):
'''simple docstring'''
_a, _a : Optional[Any] = get_image_size(lowerCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : List[str] = height // size_divisor * size_divisor
_a : List[Any] = width // size_divisor * size_divisor
_a : List[str] = resize(lowerCAmelCase__ ,(new_h, new_w) ,resample=lowerCAmelCase__ ,data_format=lowerCAmelCase__ ,**lowerCAmelCase__ )
return image
def __lowercase ( self : Any ,_a : Optional[int] ,_a : List[Any] ,_a : Optional[int] = None ,**_a : Union[str, Any] ):
'''simple docstring'''
return rescale(image=lowerCAmelCase__ ,scale=lowerCAmelCase__ ,data_format=lowerCAmelCase__ ,**lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ,_a : List[Any] ,_a : int = None ,_a : Optional[Any] = None ,_a : Optional[int]=None ,_a : str = None ,_a : Optional[Any] = None ,_a : Optional[Any] = ChannelDimension.FIRST ,**_a : Optional[int] ,):
'''simple docstring'''
_a : Tuple = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
_a : List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
_a : Tuple = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(lowerCAmelCase__ ) for img in images]
if do_resize:
_a : Tuple = [self.resize(lowerCAmelCase__ ,size_divisor=lowerCAmelCase__ ,resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
_a : Optional[int] = [self.rescale(lowerCAmelCase__ ,scale=1 / 255 ) for image in images]
_a : Optional[int] = [to_channel_dimension_format(lowerCAmelCase__ ,lowerCAmelCase__ ) for image in images]
_a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__ ,tensor_type=lowerCAmelCase__ )
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : int , __a : Tuple=False ):
"""simple docstring"""
_a : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a : Optional[int] = ''
else:
_a : List[str] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_a : str = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Any = in_proj_weight[
: config.hidden_size, :
]
_a : Any = in_proj_bias[: config.hidden_size]
_a : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_a : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ (__a : str , __a : Any , __a : Tuple ):
"""simple docstring"""
_a : int = dct.pop(UpperCamelCase__ )
_a : int = val
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : List[str] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : Tuple=True ):
"""simple docstring"""
_a : Optional[int] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_a : Union[str, Any] = 8
# set labels if required
if not base_model:
_a : Tuple = 1_0_0_0
_a : str = 'huggingface/label-files'
_a : Dict = 'imagenet-1k-id2label.json'
_a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : List[str] = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_a : int = 3_8_4
_a : str = 1_5_3_6
_a : List[str] = 1_2
_a : Optional[Any] = 6
# load original model from torch hub
_a : Optional[Any] = torch.hub.load('facebookresearch/dino:main' , UpperCamelCase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_a : Optional[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
_a : int = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if base_model:
_a : Union[str, Any] = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ).eval()
else:
_a : Optional[int] = ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor
_a : int = ViTImageProcessor()
_a : Any = image_processor(images=prepare_img() , return_tensors='pt' )
_a : Optional[int] = encoding['pixel_values']
_a : Optional[Any] = model(UpperCamelCase__ )
if base_model:
_a : List[str] = original_model(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_a : str = original_model(UpperCamelCase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
__lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str ,_a : Any ,_a : Any ):
'''simple docstring'''
_a : List[Any] = question_encoder
_a : str = generator
_a : Optional[Any] = self.question_encoder
def __lowercase ( self : List[Any] ,_a : Union[str, Any] ):
'''simple docstring'''
if os.path.isfile(_lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_a : List[str] = os.path.join(_lowerCamelCase ,'question_encoder_tokenizer' )
_a : Any = os.path.join(_lowerCamelCase ,'generator_tokenizer' )
self.question_encoder.save_pretrained(_lowerCamelCase )
self.generator.save_pretrained(_lowerCamelCase )
@classmethod
def __lowercase ( cls : List[Any] ,_a : Union[str, Any] ,**_a : Any ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
_a : Union[str, Any] = kwargs.pop('config' ,_lowerCamelCase )
if config is None:
_a : str = RagConfig.from_pretrained(_lowerCamelCase )
_a : Union[str, Any] = AutoTokenizer.from_pretrained(
_lowerCamelCase ,config=config.question_encoder ,subfolder='question_encoder_tokenizer' )
_a : str = AutoTokenizer.from_pretrained(
_lowerCamelCase ,config=config.generator ,subfolder='generator_tokenizer' )
return cls(question_encoder=_lowerCamelCase ,generator=_lowerCamelCase )
def __call__( self : Any ,*_a : Any ,**_a : int ):
'''simple docstring'''
return self.current_tokenizer(*_lowerCamelCase ,**_lowerCamelCase )
def __lowercase ( self : int ,*_a : int ,**_a : int ):
'''simple docstring'''
return self.generator.batch_decode(*_lowerCamelCase ,**_lowerCamelCase )
def __lowercase ( self : str ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.generator.decode(*_lowerCamelCase ,**_lowerCamelCase )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = self.question_encoder
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = self.generator
def __lowercase ( self : Optional[int] ,_a : List[str] ,_a : Optional[List[str]] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : str = "longest" ,_a : str = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' ,_lowerCamelCase ,)
if max_length is None:
_a : List[str] = self.current_tokenizer.model_max_length
_a : List[str] = self(
_lowerCamelCase ,add_special_tokens=_lowerCamelCase ,return_tensors=_lowerCamelCase ,max_length=_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,**_lowerCamelCase ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_a : Optional[int] = self.current_tokenizer.model_max_length
_a : Any = self(
text_target=_lowerCamelCase ,add_special_tokens=_lowerCamelCase ,return_tensors=_lowerCamelCase ,padding=_lowerCamelCase ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ,**_lowerCamelCase ,)
_a : Dict = labels['input_ids']
return model_inputs
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
if len(_snake_case ) != len(_snake_case ):
raise ValueError('String lengths must match!' )
_a : Dict = 0
for chara, chara in zip(_snake_case , _snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase = """\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"""
@dataclass
class UpperCAmelCase__ ( _a ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 42
class UpperCAmelCase__ ( _a ):
"""simple docstring"""
def __init__( self : Tuple ,_a : PriorTransformer ,_a : CLIPVisionModel ,_a : CLIPImageProcessor ,_a : HeunDiscreteScheduler ,_a : ShapERenderer ,):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=snake_case_ ,image_encoder=snake_case_ ,image_processor=snake_case_ ,scheduler=snake_case_ ,renderer=snake_case_ ,)
def __lowercase ( self : Optional[int] ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : List[Any] ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
if latents is None:
_a : int = randn_tensor(snake_case_ ,generator=snake_case_ ,device=snake_case_ ,dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_a : Union[str, Any] = latents.to(snake_case_ )
_a : List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self : Tuple ,_a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_a : Optional[Any] = torch.device(F"""cuda:{gpu_id}""" )
_a : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ ,snake_case_ )
@property
def __lowercase ( self : Any ):
'''simple docstring'''
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder ,'_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowercase ( self : Optional[Any] ,_a : Any ,_a : str ,_a : List[str] ,_a : List[str] ,):
'''simple docstring'''
if isinstance(snake_case_ ,snake_case_ ) and isinstance(image[0] ,torch.Tensor ):
_a : Any = torch.cat(snake_case_ ,axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ ,axis=0 )
if not isinstance(snake_case_ ,torch.Tensor ):
_a : List[Any] = self.image_processor(snake_case_ ,return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
_a : int = image.to(dtype=self.image_encoder.dtype ,device=snake_case_ )
_a : Optional[Any] = self.image_encoder(snake_case_ )["""last_hidden_state"""]
_a : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_a : Union[str, Any] = image_embeds.repeat_interleave(snake_case_ ,dim=0 )
if do_classifier_free_guidance:
_a : Any = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a : List[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : Dict ,_a : Union[PIL.Image.Image, List[PIL.Image.Image]] ,_a : int = 1 ,_a : int = 25 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[torch.FloatTensor] = None ,_a : float = 4.0 ,_a : int = 64 ,_a : Optional[str] = "pil" ,_a : bool = True ,):
'''simple docstring'''
if isinstance(snake_case_ ,PIL.Image.Image ):
_a : Union[str, Any] = 1
elif isinstance(snake_case_ ,torch.Tensor ):
_a : Union[str, Any] = image.shape[0]
elif isinstance(snake_case_ ,snake_case_ ) and isinstance(image[0] ,(torch.Tensor, PIL.Image.Image) ):
_a : Dict = len(snake_case_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}""" )
_a : Any = self._execution_device
_a : Any = batch_size * num_images_per_prompt
_a : Union[str, Any] = guidance_scale > 1.0
_a : Tuple = self._encode_image(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ ,device=snake_case_ )
_a : Tuple = self.scheduler.timesteps
_a : Optional[Any] = self.prior.config.num_embeddings
_a : int = self.prior.config.embedding_dim
_a : List[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) ,image_embeds.dtype ,snake_case_ ,snake_case_ ,snake_case_ ,self.scheduler ,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_a : Dict = latents.reshape(latents.shape[0] ,snake_case_ ,snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_a : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a : Tuple = self.scheduler.scale_model_input(snake_case_ ,snake_case_ )
_a : Any = self.prior(
snake_case_ ,timestep=snake_case_ ,proj_embedding=snake_case_ ,).predicted_image_embedding
# remove the variance
_a : Tuple = noise_pred.split(
scaled_model_input.shape[2] ,dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_a : List[Any] = noise_pred.chunk(2 )
_a : Any = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_a : int = self.scheduler.step(
snake_case_ ,timestep=snake_case_ ,sample=snake_case_ ,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
_a : Optional[int] = []
for i, latent in enumerate(snake_case_ ):
print()
_a : List[Any] = self.renderer.decode(
latent[None, :] ,snake_case_ ,size=snake_case_ ,ray_batch_size=4096 ,n_coarse_samples=64 ,n_fine_samples=128 ,)
images.append(snake_case_ )
_a : Tuple = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
_a : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_a : str = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self ,'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""BeitFeatureExtractor"""]
__lowerCAmelCase = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = """MobileNetV1Config"""
# Base docstring
__lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
__lowerCAmelCase = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
__lowerCAmelCase = """tabby, tabby cat"""
__lowerCAmelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCAmelCase_ (__a : Dict , __a : Optional[Any] , __a : List[str]=None ):
_a : str = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_a : Dict = model.mobilenet_va
else:
_a : Any = model
_a : Any = 'MobilenetV1/Conv2d_0/'
_a : List[Any] = backbone.conv_stem.convolution.weight
_a : int = backbone.conv_stem.normalization.bias
_a : List[Any] = backbone.conv_stem.normalization.weight
_a : Tuple = backbone.conv_stem.normalization.running_mean
_a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
_a : List[str] = i + 1
_a : int = i * 2
_a : Optional[int] = backbone.layer[pt_index]
_a : List[Any] = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_a : Optional[int] = pointer.convolution.weight
_a : Any = pointer.normalization.bias
_a : Union[str, Any] = pointer.normalization.weight
_a : Dict = pointer.normalization.running_mean
_a : Optional[Any] = pointer.normalization.running_var
_a : Union[str, Any] = backbone.layer[pt_index + 1]
_a : Tuple = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_a : Any = pointer.convolution.weight
_a : List[Any] = pointer.normalization.bias
_a : int = pointer.normalization.weight
_a : str = pointer.normalization.running_mean
_a : Optional[int] = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_a : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_a : Any = model.classifier.weight
_a : int = model.classifier.bias
return tf_to_pt_map
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] , __a : str ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
_a : List[str] = tf.train.list_variables(_UpperCAmelCase )
_a : Tuple = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
_a : Optional[int] = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
_a : Optional[Any] = array
# Build TF to PyTorch weights loading map
_a : int = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
_a : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
_a : Any = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
_a : Optional[int] = array.squeeze().transpose()
else:
_a : Tuple = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
_a : List[str] = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + '/RMSProp' , _UpperCAmelCase )
tf_weights.pop(name + '/RMSProp_1' , _UpperCAmelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , _UpperCAmelCase )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def UpperCAmelCase_ (__a : int , __a : int ):
_a : Dict = features.shape[-2:]
_a : str = conv_layer.stride
_a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
_a : str = max(kernel_height - stride_height , 0 )
else:
_a : Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_a : int = max(kernel_width - stride_width , 0 )
else:
_a : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
_a : Any = pad_along_width // 2
_a : Optional[Any] = pad_along_width - pad_left
_a : Any = pad_along_height // 2
_a : Optional[int] = pad_along_height - pad_top
_a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , 'constant' , 0.0 )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,_a : MobileNetVaConfig ,_a : int ,_a : int ,_a : int ,_a : Optional[int] = 1 ,_a : Optional[int] = 1 ,_a : bool = False ,_a : Optional[bool] = True ,_a : Optional[bool or str] = True ,):
'''simple docstring'''
super().__init__()
_a : Dict = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_a : List[str] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_a : Any = nn.Convad(
in_channels=_a ,out_channels=_a ,kernel_size=_a ,stride=_a ,padding=_a ,groups=_a ,bias=_a ,padding_mode='zeros' ,)
if use_normalization:
_a : Optional[int] = nn.BatchNormad(
num_features=_a ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=_a ,track_running_stats=_a ,)
else:
_a : Tuple = None
if use_activation:
if isinstance(_a ,_a ):
_a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,_a ):
_a : Any = ACTaFN[config.hidden_act]
else:
_a : Dict = config.hidden_act
else:
_a : Optional[int] = None
def __lowercase ( self : Union[str, Any] ,_a : torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_a : Dict = apply_tf_padding(_a ,self.convolution )
_a : Union[str, Any] = self.convolution(_a )
if self.normalization is not None:
_a : Union[str, Any] = self.normalization(_a )
if self.activation is not None:
_a : List[str] = self.activation(_a )
return features
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = MobileNetVaConfig
__UpperCAmelCase : Union[str, Any] = load_tf_weights_in_mobilenet_va
__UpperCAmelCase : List[str] = '''mobilenet_v1'''
__UpperCAmelCase : Tuple = '''pixel_values'''
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[Any] ,_a : Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(_a ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowerCAmelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowerCAmelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __UpperCamelCase , )
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict ,_a : MobileNetVaConfig ,_a : bool = True ):
'''simple docstring'''
super().__init__(_a )
_a : Optional[Any] = config
_a : Dict = 32
_a : List[str] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
_a : str = MobileNetVaConvLayer(
_a ,in_channels=config.num_channels ,out_channels=_a ,kernel_size=3 ,stride=2 ,)
_a : List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_a : Any = nn.ModuleList()
for i in range(13 ):
_a : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_a : List[Any] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a ,in_channels=_a ,out_channels=_a ,kernel_size=3 ,stride=strides[i] ,groups=_a ,) )
self.layer.append(
MobileNetVaConvLayer(
_a ,in_channels=_a ,out_channels=_a ,kernel_size=1 ,) )
_a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowercase ( self : str ,_a : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowercase ( self : Dict ,_a : Optional[torch.Tensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
_a : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_a : Tuple = self.conv_stem(_a )
_a : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_a : Any = layer_module(_a )
if output_hidden_states:
_a : Union[str, Any] = all_hidden_states + (hidden_states,)
_a : List[Any] = hidden_states
if self.pooler is not None:
_a : Tuple = torch.flatten(self.pooler(_a ) ,start_dim=1 )
else:
_a : Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a ,pooler_output=_a ,hidden_states=_a ,)
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __UpperCamelCase , )
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : MobileNetVaConfig ):
'''simple docstring'''
super().__init__(_a )
_a : List[str] = config.num_labels
_a : Dict = MobileNetVaModel(_a )
_a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_a : str = nn.Dropout(config.classifier_dropout_prob ,inplace=_a )
_a : str = nn.Linear(_a ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowercase ( self : Dict ,_a : Optional[torch.Tensor] = None ,_a : Optional[bool] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
_a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : List[str] = self.mobilenet_va(_a ,output_hidden_states=_a ,return_dict=_a )
_a : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_a : List[Any] = self.classifier(self.dropout(_a ) )
_a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Tuple = 'single_label_classification'
else:
_a : Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
_a : Union[str, Any] = MSELoss()
if self.num_labels == 1:
_a : int = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_a : Dict = loss_fct(_a ,_a )
elif self.config.problem_type == "single_label_classification":
_a : Optional[int] = CrossEntropyLoss()
_a : List[str] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : Union[str, Any] = BCEWithLogitsLoss()
_a : int = loss_fct(_a ,_a )
if not return_dict:
_a : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a ,logits=_a ,hidden_states=outputs.hidden_states ,)
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,_a : Optional[Any] ,_a : Dict ,_a : List[Any] ,_a : Optional[Any]=0.0 ,_a : Any = None ,_a : Optional[Any] = "geglu" ,_a : Dict = None ,_a : Any = False ,_a : Tuple = False ,_a : List[str] = False ,_a : List[str] = False ,_a : int = True ,_a : List[str] = "layer_norm" ,_a : Dict = False ,):
'''simple docstring'''
super().__init__()
_a : Optional[int] = only_cross_attention
_a : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_a : str = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_a : str = AdaLayerNorm(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_a : List[str] = AdaLayerNormZero(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
else:
_a : str = nn.LayerNorm(__SCREAMING_SNAKE_CASE ,elementwise_affine=__SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = Attention(
query_dim=__SCREAMING_SNAKE_CASE ,heads=__SCREAMING_SNAKE_CASE ,dim_head=__SCREAMING_SNAKE_CASE ,dropout=__SCREAMING_SNAKE_CASE ,bias=__SCREAMING_SNAKE_CASE ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=__SCREAMING_SNAKE_CASE ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_a : Optional[Any] = (
AdaLayerNorm(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(__SCREAMING_SNAKE_CASE ,elementwise_affine=__SCREAMING_SNAKE_CASE )
)
_a : Union[str, Any] = Attention(
query_dim=__SCREAMING_SNAKE_CASE ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=__SCREAMING_SNAKE_CASE ,dim_head=__SCREAMING_SNAKE_CASE ,dropout=__SCREAMING_SNAKE_CASE ,bias=__SCREAMING_SNAKE_CASE ,upcast_attention=__SCREAMING_SNAKE_CASE ,) # is self-attn if encoder_hidden_states is none
else:
_a : Union[str, Any] = None
_a : int = None
# 3. Feed-forward
_a : str = nn.LayerNorm(__SCREAMING_SNAKE_CASE ,elementwise_affine=__SCREAMING_SNAKE_CASE )
_a : int = FeedForward(__SCREAMING_SNAKE_CASE ,dropout=__SCREAMING_SNAKE_CASE ,activation_fn=__SCREAMING_SNAKE_CASE ,final_dropout=__SCREAMING_SNAKE_CASE )
# let chunk size default to None
_a : Optional[Any] = None
_a : Union[str, Any] = 0
def __lowercase ( self : Optional[int] ,_a : str ,_a : List[Any] ):
'''simple docstring'''
_a : int = chunk_size
_a : Dict = dim
def __lowercase ( self : int ,_a : Dict ,_a : Optional[Any] = None ,_a : Dict = None ,_a : List[str] = None ,_a : int = None ,_a : Optional[Any] = None ,_a : Optional[Any] = None ,):
'''simple docstring'''
if self.use_ada_layer_norm:
_a : Any = self.norma(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_a, _a, _a, _a, _a : Union[str, Any] = self.norma(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,hidden_dtype=hidden_states.dtype )
else:
_a : List[str] = self.norma(__SCREAMING_SNAKE_CASE )
_a : Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_a : Tuple = self.attna(
__SCREAMING_SNAKE_CASE ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
if self.use_ada_layer_norm_zero:
_a : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
_a : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_a : Any = (
self.norma(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(__SCREAMING_SNAKE_CASE )
)
_a : Optional[Any] = self.attna(
__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
_a : Any = attn_output + hidden_states
# 3. Feed-forward
_a : Any = self.norma(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_a : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
_a : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_a : Optional[Any] = torch.cat(
[self.ff(__SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(__SCREAMING_SNAKE_CASE ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
_a : int = self.ff(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_a : Tuple = gate_mlp.unsqueeze(1 ) * ff_output
_a : Tuple = ff_output + hidden_states
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,_a : Optional[int] ,_a : Union[str, Any] = None ,_a : Any = 4 ,_a : str = 0.0 ,_a : Optional[Any] = "geglu" ,_a : int = False ,):
'''simple docstring'''
super().__init__()
_a : Tuple = int(dim * mult )
_a : Dict = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_a : Dict = GELU(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
_a : Optional[Any] = GELU(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,approximate='tanh' )
elif activation_fn == "geglu":
_a : Optional[int] = GEGLU(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
_a : Union[str, Any] = ApproximateGELU(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
_a : Dict = nn.ModuleList([] )
# project in
self.net.append(__SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : Dict ,_a : int ):
'''simple docstring'''
for module in self.net:
_a : List[str] = module(__SCREAMING_SNAKE_CASE )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Optional[int] ,_a : List[Any] ,_a : Tuple = "none" ):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = nn.Linear(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
_a : Any = approximate
def __lowercase ( self : str ,_a : Optional[int] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def __lowercase ( self : Tuple ,_a : Any ):
'''simple docstring'''
_a : Optional[int] = self.proj(__SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = self.gelu(__SCREAMING_SNAKE_CASE )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
super().__init__()
_a : Any = nn.Linear(__SCREAMING_SNAKE_CASE ,dim_out * 2 )
def __lowercase ( self : Dict ,_a : int ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __lowercase ( self : int ,_a : Optional[int] ):
'''simple docstring'''
_a, _a : Union[str, Any] = self.proj(__SCREAMING_SNAKE_CASE ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(__SCREAMING_SNAKE_CASE )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,_a : int ,_a : Any ):
'''simple docstring'''
super().__init__()
_a : int = nn.Linear(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.proj(__SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,_a : Dict ,_a : Any ):
'''simple docstring'''
super().__init__()
_a : List[str] = nn.Embedding(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
_a : Optional[int] = nn.SiLU()
_a : Optional[Any] = nn.Linear(__SCREAMING_SNAKE_CASE ,embedding_dim * 2 )
_a : List[str] = nn.LayerNorm(__SCREAMING_SNAKE_CASE ,elementwise_affine=__SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ,_a : Optional[int] ,_a : List[str] ):
'''simple docstring'''
_a : Tuple = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE ) ) )
_a, _a : int = torch.chunk(__SCREAMING_SNAKE_CASE ,2 )
_a : Any = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,_a : List[Any] ,_a : str ):
'''simple docstring'''
super().__init__()
_a : Any = CombinedTimestepLabelEmbeddings(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
_a : int = nn.SiLU()
_a : int = nn.Linear(__SCREAMING_SNAKE_CASE ,6 * embedding_dim ,bias=__SCREAMING_SNAKE_CASE )
_a : Optional[Any] = nn.LayerNorm(__SCREAMING_SNAKE_CASE ,elementwise_affine=__SCREAMING_SNAKE_CASE ,eps=1E-6 )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : List[str] ,_a : Optional[int]=None ):
'''simple docstring'''
_a : Union[str, Any] = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,hidden_dtype=__SCREAMING_SNAKE_CASE ) ) )
_a, _a, _a, _a, _a, _a : Optional[Any] = emb.chunk(6 ,dim=1 )
_a : Tuple = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,_a : Dict ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Union[str, Any] = None ,_a : str = 1E-5 ):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = num_groups
_a : Optional[int] = eps
if act_fn is None:
_a : Optional[int] = None
else:
_a : Union[str, Any] = get_activation(__SCREAMING_SNAKE_CASE )
_a : int = nn.Linear(__SCREAMING_SNAKE_CASE ,out_dim * 2 )
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
if self.act:
_a : Tuple = self.act(__SCREAMING_SNAKE_CASE )
_a : int = self.linear(__SCREAMING_SNAKE_CASE )
_a : Optional[Any] = emb[:, :, None, None]
_a, _a : Dict = emb.chunk(2 ,dim=1 )
_a : Any = F.group_norm(__SCREAMING_SNAKE_CASE ,self.num_groups ,eps=self.eps )
_a : Optional[int] = x * (1 + scale) + shift
return x
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a : Dict ,**_a : List[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,)
_a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Any = image_classifier(_lowerCAmelCase ,candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCAmelCase ) ,[
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] ,)
_a : Any = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
] ,)
@require_tf
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,framework='tf' )
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Union[str, Any] = image_classifier(_lowerCAmelCase ,candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] ,)
_a : str = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
] ,)
@slow
@require_torch
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = pipeline(
task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,)
# This is an image of 2 cats with remotes and no planes
_a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Tuple = image_classifier(_lowerCAmelCase ,candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] ,)
_a : Any = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 ,)
@slow
@require_tf
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = pipeline(
task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,framework='tf' )
# This is an image of 2 cats with remotes and no planes
_a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Union[str, Any] = image_classifier(_lowerCAmelCase ,candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] ,)
_a : List[Any] = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 ,)
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,_a : Union[str, Any] ,_a : Dict=100 ,_a : Union[str, Any]=13 ,_a : Optional[int]=30 ,_a : Optional[Any]=2 ,_a : Optional[int]=3 ,_a : Optional[int]=True ,_a : Optional[Any]=True ,_a : List[str]=32 ,_a : Dict=5 ,_a : str=4 ,_a : List[str]=37 ,_a : Any="gelu" ,_a : Optional[int]=0.1 ,_a : List[str]=0.1 ,_a : List[str]=10 ,_a : Optional[int]=0.02 ,_a : Optional[Any]=3 ,):
'''simple docstring'''
_a : List[str] = parent
_a : str = vocab_size
_a : List[Any] = batch_size
_a : List[str] = image_size
_a : Union[str, Any] = patch_size
_a : int = num_channels
_a : Optional[Any] = is_training
_a : str = use_labels
_a : str = hidden_size
_a : List[Any] = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : List[str] = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : Union[str, Any] = type_sequence_label_size
_a : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Dict = (image_size // patch_size) ** 2
_a : Union[str, Any] = num_patches + 1
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[int] = None
if self.use_labels:
_a : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def __lowercase ( self : str ,_a : List[Any] ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = FlaxBeitModel(config=_SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] ,_a : Dict ,_a : int ,_a : Optional[int] ):
'''simple docstring'''
_a : Tuple = FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
_a : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowercase ( self : List[str] ,_a : str ,_a : int ,_a : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.type_sequence_label_size
_a : Optional[Any] = FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE )
_a : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : Union[str, Any] = 1
_a : List[str] = FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE )
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
(
(
_a
), (
_a
), (
_a
),
) : Optional[int] = config_and_inputs
_a : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowercase ( self : int ):
'''simple docstring'''
_a : Union[str, Any] = FlaxBeitModelTester(self )
_a : int = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_SCREAMING_SNAKE_CASE )
_a : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_a : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_a : int ,**_a : Dict ):
return model(pixel_values=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
_a : Dict = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_a : Optional[Any] = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : List[Any] = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
_a : Optional[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
_a : Optional[Any] = self.default_image_processor
_a : Optional[Any] = prepare_img()
_a : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='np' ).pixel_values
# prepare bool_masked_pos
_a : Union[str, Any] = np.ones((1, 196) ,dtype=_SCREAMING_SNAKE_CASE )
# forward pass
_a : Any = model(pixel_values=_SCREAMING_SNAKE_CASE ,bool_masked_pos=_SCREAMING_SNAKE_CASE )
_a : Any = outputs.logits
# verify the logits
_a : List[str] = (1, 196, 8192)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1E-2 ) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
_a : Tuple = self.default_image_processor
_a : List[str] = prepare_img()
_a : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='np' )
# forward pass
_a : Any = model(**_SCREAMING_SNAKE_CASE )
_a : int = outputs.logits
# verify the logits
_a : Tuple = (1, 1000)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
_a : List[Any] = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1E-4 ) )
_a : Union[str, Any] = 281
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
_a : Optional[int] = self.default_image_processor
_a : int = prepare_img()
_a : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='np' )
# forward pass
_a : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
_a : str = outputs.logits
# verify the logits
_a : str = (1, 2_1841)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
_a : int = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1E-4 ) )
_a : Dict = 2396
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE )
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase_ (__a : List[Any] , __a : Union[str, Any] , __a : Dict ):
"""simple docstring"""
_a : Optional[int] = AutoConfig.from_pretrained(__a )
_a : Optional[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__a )
_a : Tuple = checkpoints.load_tax_checkpoint(__a )
_a : Union[str, Any] = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_a : Optional[Any] = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a : Union[str, Any] = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : List[str] = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_a : Dict = f"""layers_{str(__a )}"""
# Self-Attention
_a : Dict = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_a : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_a : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_a : Any = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Tuple = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_a : List[str] = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_a : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_a : Any = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_a : List[Any] = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_a : Optional[int] = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_a : Optional[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_a : Dict = flax_model.params['encoder']['block'][str(__a )]['layer']
_a : str = tax_attention_key
_a : str = tax_attention_out
_a : Any = tax_attention_query
_a : Tuple = tax_attention_value
_a : List[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : List[str] = tax_global_layer_norm
if split_mlp_wi:
_a : Optional[int] = tax_mlp_wi_a
_a : List[str] = tax_mlp_wi_a
else:
_a : str = tax_mlp_wi
_a : List[Any] = tax_mlp_wo
_a : Union[str, Any] = tax_mlp_layer_norm
_a : int = flax_model_encoder_layer_block
# Only for layer 0:
_a : Dict = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_a : Dict = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Tuple = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_a : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
_a : Dict = tax_model['target']['encoder']['encoder_norm']['scale']
_a : str = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a : List[str] = f"""layers_{str(__a )}"""
# Self-Attention
_a : Optional[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_a : Tuple = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_a : Optional[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_a : Union[str, Any] = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_a : str = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_a : Optional[int] = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_a : Any = tax_enc_dec_attention_module['key']['kernel']
_a : Union[str, Any] = tax_enc_dec_attention_module['out']['kernel']
_a : Union[str, Any] = tax_enc_dec_attention_module['query']['kernel']
_a : Tuple = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_a : List[Any] = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_a : Optional[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_a : Tuple = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_a : int = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_a : Union[str, Any] = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_a : Dict = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_a : List[Any] = flax_model.params['decoder']['block'][str(__a )]['layer']
_a : Dict = tax_attention_key
_a : List[Any] = tax_attention_out
_a : Optional[Any] = tax_attention_query
_a : Optional[int] = tax_attention_value
_a : Optional[int] = tax_pre_attention_layer_norm
_a : Tuple = tax_enc_dec_attention_key
_a : Optional[Any] = tax_enc_dec_attention_out
_a : Dict = tax_enc_dec_attention_query
_a : List[str] = tax_enc_dec_attention_value
_a : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_a : Tuple = tax_mlp_wi_a
_a : Optional[int] = tax_mlp_wi_a
else:
_a : Union[str, Any] = tax_mlp_wi
_a : List[str] = tax_mlp_wo
_a : Optional[Any] = txa_mlp_layer_norm
_a : Any = flax_model_decoder_layer_block
# Decoder Normalization
_a : Union[str, Any] = tax_model['target']['decoder']['decoder_norm']['scale']
_a : Any = txa_decoder_norm
# Only for layer 0:
_a : Optional[int] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_a : Optional[Any] = tax_decoder_rel_embedding
# Token Embeddings
_a : str = tax_model['target']['token_embedder']['embedding']
_a : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a : Optional[int] = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(__a )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
__lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_a : List[Any] = set(range(3 , __a , 2 ) )
primes.add(2 )
for p in range(3 , __a , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __a , __a ) ) )
_a : Optional[int] = [float(__a ) for n in range(limit + 1 )]
for p in primes:
for n in range(__a , limit + 1 , __a ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_a : List[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.dummy_uncond_unet
_a : Optional[Any] = KarrasVeScheduler()
_a : Tuple = KarrasVePipeline(unet=lowercase__ ,scheduler=lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
_a : Optional[Any] = torch.manual_seed(0 )
_a : Any = pipe(num_inference_steps=2 ,generator=lowercase__ ,output_type='numpy' ).images
_a : Optional[Any] = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 ,generator=lowercase__ ,output_type='numpy' ,return_dict=lowercase__ )[0]
_a : Optional[int] = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'google/ncsnpp-celebahq-256'
_a : List[str] = UNetaDModel.from_pretrained(lowercase__ )
_a : Optional[Any] = KarrasVeScheduler()
_a : Optional[int] = KarrasVePipeline(unet=lowercase__ ,scheduler=lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
_a : Union[str, Any] = torch.manual_seed(0 )
_a : List[str] = pipe(num_inference_steps=20 ,generator=lowercase__ ,output_type='numpy' ).images
_a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCAmelCase = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCAmelCase = [0, 2_5, 5_0]
__lowerCAmelCase = [2_5, 5_0, 7_5]
__lowerCAmelCase = fuzz.membership.trimf(X, abca)
__lowerCAmelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCAmelCase = np.ones(7_5)
__lowerCAmelCase = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
__lowerCAmelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCAmelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCAmelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCAmelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCAmelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCAmelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCAmelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCAmelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase_ (__a : int , __a : List[str] , __a : Dict , __a : Union[str, Any]=1_0_2_4 ):
"""simple docstring"""
_a, _a : List[Any] = [], []
_a : List[Any] = list(zip(a__ , a__ ) )
_a, _a : List[str] = sorted_examples[0]
def is_too_big(__a : int ):
return tok(a__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_a : List[Any] = new_src + ' ' + src
_a : Optional[Any] = new_tgt + ' ' + tgt
if is_too_big(a__ ) or is_too_big(a__ ): # cant fit, finalize example
finished_src.append(a__ )
finished_tgt.append(a__ )
_a, _a : int = src, tgt
else: # can fit, keep adding
_a, _a : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a__ )
finished_tgt.append(a__ )
return finished_src, finished_tgt
def UpperCAmelCase_ (__a : Any , __a : Path , __a : Tuple , __a : Optional[int] ):
"""simple docstring"""
_a : str = Path(a__ )
save_path.mkdir(exist_ok=a__ )
for split in ["train"]:
_a, _a : Dict = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
_a : int = [x.rstrip() for x in Path(a__ ).open().readlines()]
_a : int = [x.rstrip() for x in Path(a__ ).open().readlines()]
_a, _a : Optional[int] = pack_examples(a__ , a__ , a__ , a__ )
print(f"""packed {split} split from {len(a__ )} examples -> {len(a__ )}.""" )
Path(save_path / f"""{split}.source""" ).open('w' ).write('\n'.join(a__ ) )
Path(save_path / f"""{split}.target""" ).open('w' ).write('\n'.join(a__ ) )
for split in ["val", "test"]:
_a, _a : Union[str, Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(a__ , save_path / f"""{split}.source""" )
shutil.copyfile(a__ , save_path / f"""{split}.target""" )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=a__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=a__ , default=1_2_8 )
parser.add_argument('--data_dir' , type=a__ )
parser.add_argument('--save_path' , type=a__ )
_a : str = parser.parse_args()
_a : List[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase__ ( __lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = RealmTokenizer
def __init__( self : str ,_a : str=None ,_a : List[str]=None ,_a : Union[str, Any]=True ,_a : List[Any]="[UNK]" ,_a : str="[SEP]" ,_a : Optional[int]="[PAD]" ,_a : List[str]="[CLS]" ,_a : Any="[MASK]" ,_a : List[str]=True ,_a : Union[str, Any]=None ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ ,tokenizer_file=UpperCAmelCase__ ,do_lower_case=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,sep_token=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,cls_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,tokenize_chinese_chars=UpperCAmelCase__ ,strip_accents=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
_a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,UpperCAmelCase__ ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(UpperCAmelCase__ ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : int = strip_accents
_a : List[str] = tokenize_chinese_chars
_a : Dict = normalizer_class(**UpperCAmelCase__ )
_a : Optional[int] = do_lower_case
def __lowercase ( self : Any ,_a : Dict ,**_a : Dict ):
'''simple docstring'''
_a : str = PaddingStrategy.MAX_LENGTH
_a : Optional[Any] = text
_a : Tuple = kwargs.pop('text_pair' ,UpperCAmelCase__ )
_a : List[Any] = kwargs.pop('return_tensors' ,UpperCAmelCase__ )
_a : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase__ ):
if batch_text_pair is not None:
_a : Any = batch_text_pair[idx]
else:
_a : Any = None
_a : Optional[int] = super().__call__(UpperCAmelCase__ ,UpperCAmelCase__ ,return_tensors=UpperCAmelCase__ ,**UpperCAmelCase__ )
_a : int = encoded_candidates.get('input_ids' )
_a : Optional[int] = encoded_candidates.get('attention_mask' )
_a : int = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase__ )
_a : str = {key: item for key, item in output_data.items() if len(UpperCAmelCase__ ) != 0}
return BatchEncoding(UpperCAmelCase__ ,tensor_type=UpperCAmelCase__ )
def __lowercase ( self : str ,_a : Any ,_a : Tuple=None ):
'''simple docstring'''
_a : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Tuple = [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase__ ,name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : List[Any]=None ,**_a : List[Any] ):
'''simple docstring'''
super().__init__(features=_a )
_a : str = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
import torch
if isinstance(_a ,_a ) and column:
if all(
isinstance(_a ,torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_a )
return column
def __lowercase ( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
import torch
if isinstance(_a ,(str, bytes, type(_a )) ):
return value
elif isinstance(_a ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_a : str = {}
if isinstance(_a ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
_a : Optional[Any] = {"""dtype""": torch.intaa}
elif isinstance(_a ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_a : List[str] = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a ,PIL.Image.Image ):
_a : List[Any] = np.asarray(_a )
return torch.tensor(_a ,**{**default_dtype, **self.torch_tensor_kwargs} )
def __lowercase ( self : Tuple ,_a : Tuple ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(_a ,'__array__' ) and not isinstance(_a ,torch.Tensor ):
_a : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a ,np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def __lowercase ( self : Tuple ,_a : Tuple ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_a ,map_list=_a )
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
_a : Optional[Any] = self.numpy_arrow_extractor().extract_row(_a )
_a : Optional[int] = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.numpy_arrow_extractor().extract_column(_a )
_a : List[str] = self.python_features_decoder.decode_column(_a ,pa_table.column_names[0] )
_a : Optional[int] = self.recursive_tensorize(_a )
_a : List[str] = self._consolidate(_a )
return column
def __lowercase ( self : str ,_a : Union[str, Any] ):
'''simple docstring'''
_a : List[Any] = self.numpy_arrow_extractor().extract_batch(_a )
_a : Optional[int] = self.python_features_decoder.decode_batch(_a )
_a : str = self.recursive_tensorize(_a )
for column_name in batch:
_a : Dict = self._consolidate(batch[column_name] )
return batch
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : List[Any] = "AAPL" ):
"""simple docstring"""
_a : Union[str, Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_a : Tuple = BeautifulSoup(requests.get(lowercase__ ).text , 'html.parser' )
_a : Optional[Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ (__a : List[str] , __a : Any = True , __a : int = math.inf , __a : Dict = -math.inf , __a : List[str] = math.inf , __a : Dict = -math.inf , __a : List[str] = False , __a : int = 1_0_0 , __a : Any = 0.01 , __a : Any = 1 , ):
"""simple docstring"""
_a : Optional[int] = False
_a : List[Any] = search_prob
_a : str = start_temperate
_a : Dict = []
_a : str = 0
_a : str = None
while not search_end:
_a : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_a : Union[str, Any] = current_state
scores.append(__snake_case )
iterations += 1
_a : str = None
_a : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a : Any = random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
_a : Union[str, Any] = neighbors.pop(__snake_case )
_a : Optional[int] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a : Any = picked_neighbor
else:
_a : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a : Optional[int] = picked_neighbor
_a : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a : Union[str, Any] = True
else:
_a : Any = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ (__a : int , __a : Optional[int] ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowerCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ (__a : Dict , __a : Tuple ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
__lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
__lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int , __a : list ):
"""simple docstring"""
_enforce_args(_a , _a )
if n == 0:
return 0
_a : int = float('-inf' )
for i in range(1 , n + 1 ):
_a : str = max(
_a , prices[i - 1] + naive_cut_rod_recursive(n - i , _a ) )
return max_revue
def UpperCAmelCase_ (__a : int , __a : list ):
"""simple docstring"""
_enforce_args(_a , _a )
_a : Tuple = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_a , _a , _a )
def UpperCAmelCase_ (__a : int , __a : list , __a : list ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a : Dict = float('-inf' )
for i in range(1 , n + 1 ):
_a : Optional[int] = max(
_a , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _a , _a ) , )
_a : str = max_revenue
return max_rev[n]
def UpperCAmelCase_ (__a : int , __a : list ):
"""simple docstring"""
_enforce_args(_a , _a )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a : Tuple = [float('-inf' ) for _ in range(n + 1 )]
_a : Optional[Any] = 0
for i in range(1 , n + 1 ):
_a : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
_a : Optional[int] = max(_a , prices[j - 1] + max_rev[i - j] )
_a : str = max_revenue_i
return max_rev[n]
def UpperCAmelCase_ (__a : int , __a : list ):
"""simple docstring"""
if n < 0:
_a : int = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_a )
if n > len(_a ):
_a : List[str] = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(_a )}"""
)
raise ValueError(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
_a : str = len(_a )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a : Any = 3_6
_a : Optional[Any] = top_down_cut_rod(_a , _a )
_a : str = bottom_up_cut_rod(_a , _a )
_a : Dict = naive_cut_rod_recursive(_a , _a )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = GPTSanJapaneseTokenizer
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# fmt: off
_a : List[Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
_a : Dict = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
_a : Any = {'unk_token': '<unk>'}
_a : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file ,'w' ) as emoji_writer:
emoji_writer.write(json.dumps(_lowercase ) )
def __lowercase ( self : Optional[int] ,**_a : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**_lowercase )
def __lowercase ( self : str ,_a : str ):
'''simple docstring'''
_a : int = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
_a : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __lowercase ( self : Any ,_a : Any ):
'''simple docstring'''
_a, _a : Tuple = self.get_input_output_texts(_lowercase )
_a : Union[str, Any] = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
_a : Tuple = tokenizer.decode(_lowercase ,clean_up_tokenization_spaces=_lowercase )
return text, ids
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = self.get_tokenizer()
# Testing tokenization
_a : Tuple = 'こんにちは、世界。 こんばんは、㔺界。'
_a : List[Any] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
_a : Optional[Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
# Testing conversion to ids without special tokens
_a : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_a : Dict = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
# Testing conversion to ids with special tokens
_a : str = tokens + [tokenizer.unk_token]
_a : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_a : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.get_tokenizer()
# Testing tokenization
_a : Optional[Any] = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
_a : int = 'こんにちは、、、、世界。こんばんは、、、、世界。'
_a : Tuple = tokenizer.encode(_lowercase )
_a : Union[str, Any] = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase ,_lowercase )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_a : Any = 'こんにちは、世界。'
_a : Any = 'こんばんは、㔺界。😀'
_a : List[str] = 'こんにちは、世界。こんばんは、世界。😀'
_a : List[str] = tokenizer.encode(prefix_text + input_text )
_a : int = tokenizer.encode('' ,prefix_text=prefix_text + input_text )
_a : List[str] = tokenizer.encode(_lowercase ,prefix_text=_lowercase )
_a : Union[str, Any] = tokenizer.decode(_lowercase )
_a : Optional[int] = tokenizer.decode(_lowercase )
_a : Tuple = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase ,_lowercase )
self.assertEqual(_lowercase ,_lowercase )
self.assertEqual(_lowercase ,_lowercase )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_a : Tuple = 'こんにちは、世界。'
_a : List[str] = 'こんばんは、㔺界。😀'
_a : Dict = len(tokenizer.encode(_lowercase ) ) - 2
_a : Tuple = len(tokenizer.encode(_lowercase ) ) - 2
_a : Optional[int] = [1] + [0] * (len_prefix + len_text + 1)
_a : List[str] = [1] * (len_prefix + len_text + 1) + [0]
_a : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_a : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_a : Optional[Any] = tokenizer('' ,prefix_text=prefix_text + input_text ).token_type_ids
_a : str = tokenizer(_lowercase ,prefix_text=_lowercase ).token_type_ids
self.assertListEqual(_lowercase ,_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_a : List[str] = tokenizer.encode('あンいワ' )
_a : str = tokenizer.encode('' ,prefix_text='あンいワ' )
_a : Optional[Any] = tokenizer.encode('いワ' ,prefix_text='あン' )
self.assertEqual(tokenizer.decode(_lowercase ) ,tokenizer.decode(_lowercase ) )
self.assertEqual(tokenizer.decode(_lowercase ) ,tokenizer.decode(_lowercase ) )
self.assertNotEqual(_lowercase ,_lowercase )
self.assertNotEqual(_lowercase ,_lowercase )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_a : Any = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
_a : Optional[int] = tokenizer(_lowercase ,padding=_lowercase )
_a : Optional[int] = tokenizer.batch_encode_plus(_lowercase ,padding=_lowercase )
# fmt: off
_a : str = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
_a : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_a : Optional[int] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,_lowercase )
self.assertListEqual(x_token.token_type_ids ,_lowercase )
self.assertListEqual(x_token.attention_mask ,_lowercase )
self.assertListEqual(x_token_a.input_ids ,_lowercase )
self.assertListEqual(x_token_a.token_type_ids ,_lowercase )
self.assertListEqual(x_token_a.attention_mask ,_lowercase )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_a : Optional[int] = DisjunctiveConstraint(_UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids ,_UpperCAmelCase ) )
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint(_UpperCAmelCase ) # fails here
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
_a : int = DisjunctiveConstraint(_UpperCAmelCase )
_a, _a, _a : List[Any] = dc.update(1 )
_a : str = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a, _a, _a : Any = dc.update(2 )
_a : Dict = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a, _a, _a : Dict = dc.update(3 )
_a : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_a : str = DisjunctiveConstraint(_UpperCAmelCase )
_a, _a, _a : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a, _a, _a : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a, _a, _a : List[str] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_a, _a, _a : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_a, _a, _a : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_a, _a, _a : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_a, _a, _a : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
__UpperCAmelCase : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
__UpperCAmelCase : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=_a , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=_a , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def UpperCAmelCase_ () -> Optional[int]:
"""simple docstring"""
_a : List[str] = HfArgumentParser((ModelArguments,) )
(_a ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_a : List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_a : Optional[int] = True
_a : Dict = True
_a : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__a , decoder_config=__a , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_a : Optional[Any] = decoder_config.decoder_start_token_id
_a : List[str] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_a : List[Any] = decoder_config.bos_token_id
if pad_token_id is None:
_a : Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_a : List[str] = decoder_config.eos_token_id
_a : List[Any] = decoder_start_token_id
_a : List[Any] = pad_token_id
_a : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_a : Any = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_a : Union[str, Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[Any] = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask' ,model=lowercase_ )
# baseline - just load from_pretrained with normal network
_a : Any = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Dict = '1'
_a : str = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : str = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
_a : int = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask' ,model=lowercase_ )
# baseline - just load from_pretrained with normal network
_a : List[str] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Optional[Any] = self.get_env()
_a : Optional[int] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n '
_a : int = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : Tuple = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : int = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n '
_a : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
_a : Dict = self.get_env()
_a : Optional[Any] = '1'
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : List[Any] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = '\nfrom transformers import AutoModel\n '
_a : List[str] = '\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : int = self.get_env()
_a : List[str] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Union[str, Any] = '1'
_a : Optional[int] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = '''van'''
def __init__( self : int ,_a : List[Any]=224 ,_a : Optional[Any]=3 ,_a : Dict=[7, 3, 3, 3] ,_a : List[str]=[4, 2, 2, 2] ,_a : List[str]=[64, 128, 320, 512] ,_a : int=[3, 3, 12, 3] ,_a : Optional[Any]=[8, 8, 4, 4] ,_a : int="gelu" ,_a : List[Any]=0.02 ,_a : Tuple=1E-6 ,_a : Dict=1E-2 ,_a : Dict=0.0 ,_a : Dict=0.0 ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
_a : Any = image_size
_a : List[str] = num_channels
_a : str = patch_sizes
_a : str = strides
_a : Optional[int] = hidden_sizes
_a : str = depths
_a : Optional[Any] = mlp_ratios
_a : int = hidden_act
_a : Optional[int] = initializer_range
_a : List[Any] = layer_norm_eps
_a : Union[str, Any] = layer_scale_init_value
_a : str = drop_path_rate
_a : Optional[Any] = dropout_rate
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''bloom'''
__UpperCAmelCase : Optional[int] = ['''past_key_values''']
__UpperCAmelCase : int = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Union[str, Any] ,_a : Union[str, Any]=25_0880 ,_a : Optional[int]=64 ,_a : Dict=2 ,_a : Optional[Any]=8 ,_a : Union[str, Any]=1E-5 ,_a : List[Any]=0.02 ,_a : Any=True ,_a : str=1 ,_a : Tuple=2 ,_a : str=False ,_a : str=0.0 ,_a : Any=0.0 ,_a : str=1 ,_a : Union[str, Any]=False ,**_a : Optional[int] ,):
'''simple docstring'''
_a : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
_a : Tuple = kwargs.pop('n_embed' ,_SCREAMING_SNAKE_CASE )
_a : Optional[Any] = hidden_size if n_embed is None else n_embed
_a : List[Any] = n_layer
_a : Optional[Any] = n_head
_a : List[str] = layer_norm_epsilon
_a : int = initializer_range
_a : Any = use_cache
_a : Optional[int] = pretraining_tp
_a : Any = apply_residual_connection_post_layernorm
_a : Any = hidden_dropout
_a : Tuple = attention_dropout
_a : Union[str, Any] = bos_token_id
_a : Optional[int] = eos_token_id
_a : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = version.parse('''1.12''' )
def __init__( self : Any ,_a : Dict ,_a : Any = "default" ,_a : int = None ,_a : Any = False ,):
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE ,task=_SCREAMING_SNAKE_CASE ,patching_specs=_SCREAMING_SNAKE_CASE ,use_past=_SCREAMING_SNAKE_CASE )
if not getattr(self._config ,'pad_token_id' ,_SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
_a : Optional[Any] = 0
@property
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE ,direction='inputs' ,inverted_values_shape=_SCREAMING_SNAKE_CASE )
_a : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return self._config.n_layer
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self._config.n_head
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return 1E-3
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : int = -1 ,_a : Optional[Any] = -1 ,_a : int = False ,_a : Tuple = None ,):
'''simple docstring'''
_a : List[str] = super(_SCREAMING_SNAKE_CASE ,self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,seq_length=_SCREAMING_SNAKE_CASE ,is_pair=_SCREAMING_SNAKE_CASE ,framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
_a : List[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a : Any = seqlen + 2
_a : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
_a : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_a : Dict = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_a : List[str] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
_a : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
_a : str = ordered_inputs['''attention_mask'''].dtype
_a : Tuple = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE )] ,dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 13
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ ( a_ ):
"""simple docstring"""
def __init__( self : int ,_a : str ,_a : Optional[Any]=13 ,_a : Dict=7 ,_a : Any=True ,_a : Any=True ,_a : str=True ,_a : Tuple=True ,_a : int=99 ,_a : List[str]=32 ,_a : Any=5 ,_a : Optional[Any]=4 ,_a : Optional[int]=37 ,_a : str="gelu" ,_a : List[Any]=0.1 ,_a : Tuple=0.1 ,_a : Dict=512 ,_a : List[Any]=16 ,_a : Optional[int]=2 ,_a : Optional[Any]=0.02 ,_a : List[Any]=False ,_a : List[str]=True ,_a : Tuple="None" ,_a : Optional[int]=3 ,_a : Optional[Any]=4 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = seq_length
_a : int = is_training
_a : Optional[int] = use_input_mask
_a : Any = use_token_type_ids
_a : Any = use_labels
_a : List[Any] = vocab_size
_a : Union[str, Any] = hidden_size
_a : int = num_hidden_layers
_a : int = num_attention_heads
_a : List[str] = intermediate_size
_a : int = hidden_act
_a : Tuple = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : List[str] = type_sequence_label_size
_a : List[str] = initializer_range
_a : List[Any] = num_labels
_a : Dict = num_choices
_a : Dict = relative_attention
_a : Optional[int] = position_biased_input
_a : List[Any] = pos_att_type
_a : Optional[int] = scope
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : str = None
if self.use_input_mask:
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_a : Optional[int] = None
if self.use_token_type_ids:
_a : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_a : Any = None
_a : Dict = None
_a : Dict = None
if self.use_labels:
_a : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
_a : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Dict ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[int] = self.get_config()
_a : List[Any] = 300
return config
def __lowercase ( self : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def __lowercase ( self : Optional[Any] ,_a : Tuple ,_a : Tuple ,_a : List[Any] ,_a : str ,_a : Union[str, Any] ,_a : str ,_a : List[Any] ):
'''simple docstring'''
_a : str = DebertaModel(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,attention_mask=_a ,token_type_ids=_a )[0]
_a : str = model(_a ,token_type_ids=_a )[0]
_a : Optional[Any] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ,_a : Optional[Any] ,_a : Any ,_a : Tuple ,_a : List[Any] ,_a : Any ,_a : List[Any] ):
'''simple docstring'''
_a : Dict = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : str = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Optional[int] ,_a : Tuple ,_a : int ,_a : Any ,_a : Optional[int] ,_a : List[str] ,_a : str ,_a : str ):
'''simple docstring'''
_a : Optional[int] = self.num_labels
_a : Union[str, Any] = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def __lowercase ( self : int ,_a : List[Any] ,_a : List[Any] ,_a : str ,_a : List[str] ,_a : int ,_a : int ,_a : Union[str, Any] ):
'''simple docstring'''
_a : int = self.num_labels
_a : Union[str, Any] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : List[Any] ,_a : Optional[int] ,_a : int ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Any = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : str = model(
_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
),
) : Tuple = config_and_inputs
_a : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = DebertaModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[Any] = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
_a : Dict = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_a : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : int = model(_a ,attention_mask=_a )[0]
# compare the actual values for a slice.
_a : Optional[int] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_a ,atol=1E-4 ) ,F"""{output[:, 1:4, 1:4]}""" )
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Any ,_a : int ):
'''simple docstring'''
super().__init__()
_a : Tuple = nn.ModuleList(_snake_case )
def __lowercase ( self : int ,_a : Tuple ,_a : str ,_a : List[str] ,_a : Union[str, Any] ,_a : Optional[int] ,_a : str = None ,_a : int = None ,_a : Union[str, Any] = None ,_a : str = None ,_a : int = False ,_a : Optional[Any] = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(_snake_case ,_snake_case ,self.nets ) ):
_a : List[Any] = controlnet(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,)
# merge samples
if i == 0:
_a : Optional[int] = down_samples, mid_sample
else:
_a : List[Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_snake_case ,_snake_case )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowercase ( self : Optional[int] ,_a : Tuple ,_a : int = True ,_a : Dict = None ,_a : Any = False ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Union[str, Any] = 0
_a : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_snake_case ,is_main_process=_snake_case ,save_function=_snake_case ,safe_serialization=_snake_case ,variant=_snake_case ,)
idx += 1
_a : Union[str, Any] = model_path_to_save + F"""_{idx}"""
@classmethod
def __lowercase ( cls : List[Any] ,_a : Optional[int] ,**_a : Tuple ):
'''simple docstring'''
_a : List[str] = 0
_a : Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_a : int = pretrained_model_path
while os.path.isdir(_snake_case ):
_a : Union[str, Any] = ControlNetModel.from_pretrained(_snake_case ,**_snake_case )
controlnets.append(_snake_case )
idx += 1
_a : Union[str, Any] = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(_snake_case )} controlnets loaded from {pretrained_model_path}.""" )
if len(_snake_case ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(_snake_case )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(_snake_case )
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_a : List[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCAmelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def UpperCAmelCase_ (__a : str , __a : Union[str, Any] , __a : Tuple ):
"""simple docstring"""
_a : Tuple = hf_hub_url(repo_id=__a , path=__a , revision=__a )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__a )}"""
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'spiece.model'}
__lowerCAmelCase = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase = {'bert_for_seq_generation': 5_1_2}
class UpperCAmelCase__ ( _lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = []
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,_a : str ,_a : Optional[Any]="<s>" ,_a : Any="</s>" ,_a : Optional[Any]="<unk>" ,_a : List[Any]="<pad>" ,_a : List[str]="<::::>" ,_a : Optional[Dict[str, Any]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
_a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,pad_token=lowercase_ ,sep_token=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,)
_a : Optional[int] = vocab_file
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = self.__dict__.copy()
_a : Optional[int] = None
return state
def __setstate__( self : Any ,_a : Optional[int] ):
'''simple docstring'''
_a : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Tuple = {}
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase_ ,out_type=lowercase_ )
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.piece_to_id(lowercase_ )
def __lowercase ( self : Dict ,_a : str ):
'''simple docstring'''
_a : int = self.sp_model.IdToPiece(lowercase_ )
return token
def __lowercase ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
_a : Dict = []
_a : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
_a : Dict = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __lowercase ( self : List[str] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
lowercase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ (__a : list , __a : list ):
"""simple docstring"""
if len(lowercase_ ) != 2 or len(a[0] ) != 2 or len(lowercase_ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
_a : Tuple = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_ (__a : list , __a : list ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase_ ) )
]
def UpperCAmelCase_ (__a : list , __a : list ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase_ ) )
]
def UpperCAmelCase_ (__a : list ):
"""simple docstring"""
if len(lowercase_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
_a : int = len(lowercase_ )
_a : Union[str, Any] = matrix_length // 2
_a : Optional[Any] = [[a[i][j] for j in range(lowercase_ , lowercase_ )] for i in range(lowercase_ )]
_a : Optional[Any] = [
[a[i][j] for j in range(lowercase_ , lowercase_ )] for i in range(lowercase_ , lowercase_ )
]
_a : Union[str, Any] = [[a[i][j] for j in range(lowercase_ )] for i in range(lowercase_ )]
_a : Any = [[a[i][j] for j in range(lowercase_ )] for i in range(lowercase_ , lowercase_ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_ (__a : list ):
"""simple docstring"""
return len(lowercase_ ), len(matrix[0] )
def UpperCAmelCase_ (__a : list ):
"""simple docstring"""
print('\n'.join(str(lowercase_ ) for line in matrix ) )
def UpperCAmelCase_ (__a : list , __a : list ):
"""simple docstring"""
if matrix_dimensions(lowercase_ ) == (2, 2):
return default_matrix_multiplication(lowercase_ , lowercase_ )
_a, _a, _a, _a : Any = split_matrix(lowercase_ )
_a, _a, _a, _a : List[str] = split_matrix(lowercase_ )
_a : int = actual_strassen(lowercase_ , matrix_subtraction(lowercase_ , lowercase_ ) )
_a : Union[str, Any] = actual_strassen(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ )
_a : Any = actual_strassen(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ )
_a : Any = actual_strassen(lowercase_ , matrix_subtraction(lowercase_ , lowercase_ ) )
_a : Optional[int] = actual_strassen(matrix_addition(lowercase_ , lowercase_ ) , matrix_addition(lowercase_ , lowercase_ ) )
_a : Optional[Any] = actual_strassen(matrix_subtraction(lowercase_ , lowercase_ ) , matrix_addition(lowercase_ , lowercase_ ) )
_a : List[str] = actual_strassen(matrix_subtraction(lowercase_ , lowercase_ ) , matrix_addition(lowercase_ , lowercase_ ) )
_a : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ ) , lowercase_ )
_a : Optional[int] = matrix_addition(lowercase_ , lowercase_ )
_a : List[Any] = matrix_addition(lowercase_ , lowercase_ )
_a : str = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ ) , lowercase_ )
# construct the new matrix from our 4 quadrants
_a : Tuple = []
for i in range(len(lowercase_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_ (__a : list , __a : list ):
"""simple docstring"""
if matrix_dimensions(lowercase_ )[1] != matrix_dimensions(lowercase_ )[0]:
_a : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase_ )
_a : Optional[Any] = matrix_dimensions(lowercase_ )
_a : Optional[int] = matrix_dimensions(lowercase_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_a : Union[str, Any] = max(*lowercase_ , *lowercase_ )
_a : int = int(math.pow(2 , math.ceil(math.loga(lowercase_ ) ) ) )
_a : Union[str, Any] = matrixa
_a : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowercase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_a : Optional[Any] = actual_strassen(lowercase_ , lowercase_ )
# Removing the additional zeros
for i in range(0 , lowercase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__lowerCAmelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__lowerCAmelCase = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
__lowerCAmelCase = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
__lowerCAmelCase = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 1_2,
'''Pm''': 1_5,
'''Em''': 1_8,
'''Zm''': 2_1,
'''Ym''': 2_4,
}
def UpperCAmelCase_ (__a : List[Any] , __a : List[str] , __a : str ):
"""simple docstring"""
_a : str = from_type.lower().strip('s' )
_a : Dict = to_type.lower().strip('s' )
_a : str = UNIT_SYMBOL.get(A_ , A_ )
_a : str = UNIT_SYMBOL.get(A_ , A_ )
if from_sanitized not in METRIC_CONVERSION:
_a : List[Any] = (
f"""Invalid \'from_type\' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(A_ )}"""
)
raise ValueError(A_ )
if to_sanitized not in METRIC_CONVERSION:
_a : Tuple = (
f"""Invalid \'to_type\' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(A_ )}"""
)
raise ValueError(A_ )
_a : Optional[Any] = METRIC_CONVERSION[from_sanitized]
_a : Dict = METRIC_CONVERSION[to_sanitized]
_a : Optional[Any] = 1
if from_exponent > to_exponent:
_a : Tuple = from_exponent - to_exponent
else:
_a : Optional[int] = -(to_exponent - from_exponent)
return value * pow(1_0 , A_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''dpt'''
def __init__( self : Optional[int] ,_a : Union[str, Any]=768 ,_a : int=12 ,_a : str=12 ,_a : Union[str, Any]=3072 ,_a : int="gelu" ,_a : Dict=0.0 ,_a : Optional[Any]=0.0 ,_a : Optional[Any]=0.02 ,_a : List[str]=1E-12 ,_a : int=384 ,_a : int=16 ,_a : Optional[Any]=3 ,_a : int=False ,_a : str=True ,_a : List[Any]=[2, 5, 8, 11] ,_a : List[Any]="project" ,_a : List[Any]=[4, 2, 1, 0.5] ,_a : List[str]=[96, 192, 384, 768] ,_a : Optional[int]=256 ,_a : Dict=-1 ,_a : int=False ,_a : Union[str, Any]=True ,_a : Optional[int]=0.4 ,_a : str=255 ,_a : Dict=0.1 ,_a : List[Any]=[1, 1024, 24, 24] ,_a : Optional[Any]=[0, 1] ,_a : Dict=None ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(**__A )
_a : Tuple = hidden_size
_a : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
_a : int = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
_a : int = BitConfig(**__A )
elif isinstance(__A ,__A ):
logger.info('Initializing the config with a `BiT` backbone.' )
_a : Tuple = BitConfig(**__A )
elif isinstance(__A ,__A ):
_a : List[Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_a : Dict = backbone_featmap_shape
_a : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
_a : Optional[int] = None
_a : str = None
_a : int = []
_a : Tuple = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Any = intermediate_size
_a : Optional[int] = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Dict = initializer_range
_a : List[str] = layer_norm_eps
_a : str = image_size
_a : List[str] = patch_size
_a : Union[str, Any] = num_channels
_a : int = qkv_bias
_a : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
_a : Optional[Any] = readout_type
_a : int = reassemble_factors
_a : Any = neck_hidden_sizes
_a : Union[str, Any] = fusion_hidden_size
_a : int = head_in_index
_a : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_a : int = use_auxiliary_head
_a : int = auxiliary_loss_weight
_a : Optional[int] = semantic_loss_ignore_index
_a : Union[str, Any] = semantic_classifier_dropout
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a : Union[str, Any] = self.backbone_config.to_dict()
_a : Tuple = self.__class__.model_type
return output
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
__lowerCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Union[str, Any] = git.Repo(search_parent_directories=__a )
_a : Dict = {
"""repo_id""": str(__a ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(__a , 'git_log.json' ) , 'w' ) as f:
json.dump(__a , __a , indent=4 )
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
if params.n_gpu <= 0:
_a : int = 0
_a : List[Any] = -1
_a : Dict = True
_a : List[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
_a : str = int(os.environ['WORLD_SIZE'] )
_a : Tuple = int(os.environ['N_GPU_NODE'] )
_a : Tuple = int(os.environ['RANK'] )
# number of nodes / node ID
_a : List[Any] = params.world_size // params.n_gpu_per_node
_a : Optional[Any] = params.global_rank // params.n_gpu_per_node
_a : Any = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
_a : Optional[int] = 1
_a : Tuple = 0
_a : Tuple = 0
_a : List[str] = 0
_a : str = 1
_a : Any = 1
_a : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_a : List[Any] = params.node_id == 0 and params.local_rank == 0
_a : Tuple = params.n_nodes > 1
# summary
_a : Tuple = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = (DDPMScheduler,)
def __lowercase ( self : Optional[int] ,**_a : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowerCamelCase_ )
return config
def __lowercase ( self : Dict ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ ,beta_end=lowerCamelCase_ )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def __lowercase ( self : str ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ ,prediction_type=lowerCamelCase_ ,sample_max_value=lowerCamelCase_ ,)
def __lowercase ( self : Any ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __lowercase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCamelCase_ )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[int] = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : int = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Tuple = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**lowerCamelCase_ )
_a : Dict = len(lowerCamelCase_ )
_a : Union[str, Any] = self.dummy_model()
_a : Optional[int] = self.dummy_sample_deter
_a : Tuple = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
_a : Tuple = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
_a : Optional[int] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : List[Any] = pred_prev_sample
_a : Optional[Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
_a : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
_a : Any = scheduler_class(**lowerCamelCase_ )
_a : int = len(lowerCamelCase_ )
_a : int = self.dummy_model()
_a : str = self.dummy_sample_deter
_a : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
_a : Optional[Any] = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
_a : List[Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : Optional[Any] = pred_prev_sample
_a : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
_a : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[int] = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Tuple = scheduler_class(**lowerCamelCase_ )
_a : List[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
_a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
_a : Union[str, Any] = -1
else:
_a : int = timesteps[i + 1]
_a : Any = scheduler.previous_timestep(lowerCamelCase_ )
_a : Union[str, Any] = prev_t.item()
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = self.scheduler_classes[0]
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**lowerCamelCase_ )
_a : Dict = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_ ,msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
_a : List[str] = [100, 87, 50, 1, 0]
_a : Dict = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ,msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ ,timesteps=lowerCamelCase_ )
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.scheduler_classes[0]
_a : Optional[int] = self.get_scheduler_config()
_a : int = scheduler_class(**lowerCamelCase_ )
_a : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ ,msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' ,):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : int = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
_a : Optional[int] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
_a : Optional[Any] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_a : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
_a : Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(__lowerCAmelCase )-1}""" )
if "norm" in key:
_a : Dict = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_a : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
_a : List[str] = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(__lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
_a : Tuple = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_a : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_a : Tuple = key[key.find('block' ) + len('block' )]
_a : Optional[int] = key.replace(f"""block{idx}""" , f"""block.{int(__lowerCAmelCase )-1}""" )
if "attn.q" in key:
_a : Dict = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_a : str = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_a : Union[str, Any] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_a : List[Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_a : List[str] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_a : Any = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_a : int = key.replace('linear_fuse.conv' , 'linear_fuse' )
_a : Optional[int] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_a : Tuple = key[key.find('linear_c' ) + len('linear_c' )]
_a : Any = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(__lowerCAmelCase )-1}""" )
if "bot_conv" in key:
_a : List[str] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
_a : Dict = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
_a : int = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
_a : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
_a : List[Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
_a : Union[str, Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
_a : Dict = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
_a : Optional[Any] = key.replace('module.last_layer_depth' , 'head.head' )
_a : int = value
return new_state_dict
def UpperCAmelCase_ (__a : Optional[int] , __a : Optional[Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_a : Optional[Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_a : int = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_a : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
_a : Optional[int] = kv_bias[: config.hidden_sizes[i]]
_a : str = kv_weight[
config.hidden_sizes[i] :, :
]
_a : Tuple = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ (__a : Dict , __a : List[Any] , __a : str=False , __a : Optional[int]=None ):
"""simple docstring"""
_a : Dict = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
_a : Dict = GLPNImageProcessor()
# prepare image
_a : Union[str, Any] = prepare_img()
_a : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
_a : str = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
_a : int = rename_keys(__lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(__lowerCAmelCase , __lowerCAmelCase )
# create HuggingFace model and load state dict
_a : Tuple = GLPNForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# forward pass
_a : int = model(__lowerCAmelCase )
_a : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_a : str = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_a : Optional[Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_a : Tuple = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you\'re pushing to the hub.""",
)
__lowerCAmelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(_lowercase , np.ndarray ):
return list(tensor.shape )
_a : Any = tf.shape(_lowercase )
if tensor.shape == tf.TensorShape(_lowercase ):
return dynamic
_a : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_lowercase )]
def UpperCAmelCase_ (__a : tf.Tensor , __a : Optional[int] = None , __a : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowercase , name=_lowercase )
def UpperCAmelCase_ (__a : List[Any] , __a : Optional[int] , __a : Tuple , __a : Any=1e-5 , __a : Union[str, Any]=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowercase , _lowercase ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_a : Union[str, Any] = tf.nn.moments(_lowercase , axes=[axis] , keepdims=_lowercase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_a : str = [1] * inputs.shape.rank
_a : Tuple = shape_list(_lowercase )[axis]
_a : Optional[int] = tf.reshape(_lowercase , _lowercase )
_a : Dict = tf.reshape(_lowercase , _lowercase )
# Compute layer normalization using the batch_normalization
# function.
_a : Optional[int] = tf.nn.batch_normalization(
_lowercase , _lowercase , _lowercase , offset=_lowercase , scale=_lowercase , variance_epsilon=_lowercase , )
return outputs
def UpperCAmelCase_ (__a : Any , __a : int=0 , __a : Optional[Any]=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_a : int = tf.shape(_lowercase )
_a : str = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_a : Optional[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_lowercase , _lowercase )
def UpperCAmelCase_ (__a : tf.Tensor ):
"""simple docstring"""
if not isinstance(_lowercase , tf.Tensor ):
_a : Union[str, Any] = tf.convert_to_tensor(_lowercase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_a : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_a : List[str] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_a : Union[str, Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ (__a : tf.Tensor , __a : int , __a : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
_lowercase , tf.cast(_lowercase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_lowercase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def UpperCAmelCase_ (__a : int , __a : Dict , __a : str ):
"""simple docstring"""
_a : int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_a : Any = [x for x in data if len(_lowercase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_a : Any = np.asarray(_lowercase )
_a : List[Any] = 1
_a : Optional[int] = np.array_split(_lowercase , _lowercase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_a : int = np.array_split(_lowercase , _lowercase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_lowercase ):
_a : Dict = chunk_data
else:
_a : Union[str, Any] = data
def UpperCAmelCase_ (__a : List[Any] , __a : Optional[int] ):
"""simple docstring"""
if name in group.attrs:
_a : int = [n.decode('utf8' ) if hasattr(_lowercase , 'decode' ) else n for n in group.attrs[name]]
else:
_a : List[str] = []
_a : Dict = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(_lowercase , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
def _expand_single_ad_tensor(__a : Optional[Any] ):
if isinstance(_lowercase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_lowercase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _lowercase )
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__lowerCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ (__a : Tuple , __a : Dict ):
"""simple docstring"""
if os.path.exists(__a ):
if os.path.exists(os.path.join(__a , 'config.json' ) ) and os.path.isfile(
os.path.join(__a , 'config.json' ) ):
os.remove(os.path.join(__a , 'config.json' ) )
if os.path.exists(os.path.join(__a , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__a , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__a , 'pytorch_model.bin' ) )
else:
os.makedirs(__a )
model.save_pretrained(__a )
def UpperCAmelCase_ (__a : Optional[Any] , __a : int=False ):
"""simple docstring"""
_a : Any = 2
if unlogit:
_a : str = torch.pow(__a , __a )
_a : Union[str, Any] = p * torch.log(__a )
_a : Optional[int] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(f"""{x + 1}""" for x in range(len(__a ) ) ) )
for row in range(len(__a ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '\t'.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '\t'.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def UpperCAmelCase_ (__a : List[str] , __a : Any , __a : Optional[int] , __a : int=True , __a : Optional[int]=True , __a : Tuple=None , __a : Tuple=False ):
"""simple docstring"""
_a, _a : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads
_a : Optional[Any] = torch.zeros(__a , __a ).to(args.device )
_a : Tuple = torch.zeros(__a , __a ).to(args.device )
if head_mask is None:
_a : Union[str, Any] = torch.ones(__a , __a ).to(args.device )
head_mask.requires_grad_(requires_grad=__a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a : List[str] = None
_a : Any = 0.0
_a : List[str] = 0.0
for step, inputs in enumerate(tqdm(__a , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_a : Tuple = tuple(t.to(args.device ) for t in inputs )
((_a ), ) : Optional[int] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a : Dict = model(__a , labels=__a , head_mask=__a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a, _a, _a : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__a ):
_a : Any = entropy(attn.detach() , __a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a : Union[str, Any] = 2
_a : List[str] = torch.pow(torch.pow(__a , __a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_a : Optional[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__a )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__a )
logger.info('Head ranked by importance scores' )
_a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_a : List[str] = torch.arange(
head_importance.numel() , device=args.device )
_a : Optional[Any] = head_ranks.view_as(__a )
print_ad_tensor(__a )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase_ (__a : Optional[int] , __a : List[str] , __a : int ):
"""simple docstring"""
_a, _a, _a : Optional[int] = compute_heads_importance(__a , __a , __a , compute_entropy=__a )
_a : int = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __a , original_score * args.masking_threshold )
_a : Optional[Any] = torch.ones_like(__a )
_a : str = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_a : int = original_score
while current_score >= original_score * args.masking_threshold:
_a : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a : str = float('Inf' )
_a : Dict = head_importance.view(-1 ).sort()[1]
if len(__a ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_a : List[str] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_a : Union[str, Any] = new_head_mask.view(-1 )
_a : Tuple = 0.0
_a : Tuple = new_head_mask.view_as(__a )
_a : List[Any] = new_head_mask.clone().detach()
print_ad_tensor(__a )
# Compute metric and head importance again
_a, _a, _a : List[Any] = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , head_mask=__a )
_a : Optional[int] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(__a )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase_ (__a : Optional[int] , __a : Tuple , __a : Tuple , __a : Optional[Any] ):
"""simple docstring"""
_a : str = datetime.now()
_a, _a, _a : Optional[int] = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , compute_importance=__a , head_mask=__a )
_a : List[str] = 1 / loss
_a : Optional[Any] = datetime.now() - before_time
_a : List[str] = sum(p.numel() for p in model.parameters() )
_a : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__a ) )
}
for k, v in heads_to_prune.items():
if isinstance(__a , __a ):
_a : List[Any] = [
v,
]
assert sum(len(__a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__a )
_a : Dict = sum(p.numel() for p in model.parameters() )
_a : str = datetime.now()
_a, _a, _a : List[Any] = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , compute_importance=__a , head_mask=__a , actually_pruned=__a , )
_a : List[str] = 1 / loss
_a : Any = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __a , __a , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __a , __a )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(__a , args.output_dir )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__a , type=__a , required=__a , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__a , type=__a , required=__a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__a , type=__a , required=__a , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__a , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__a , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__a , type=__a , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__a , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__a , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__a , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__a , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=__a , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__a , help='Batch size.' )
parser.add_argument('--seed' , type=__a , default=4_2 )
parser.add_argument('--local_rank' , type=__a , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__a , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__a , default='' , help='Can be used for distant debugging.' )
_a : str = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a : int = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_a : List[str] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_a : Any = torch.device('cuda' , args.local_rank )
_a : str = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_a : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_a : List[str] = nn.parallel.DistributedDataParallel(
__a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__a )
elif args.n_gpu > 1:
_a : Dict = nn.DataParallel(__a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__a )
torch.save(__a , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __a )
# Prepare dataset
_a : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_a : int = (torch.from_numpy(__a ),)
_a : Any = TensorDataset(*__a )
_a : int = RandomSampler(__a )
_a : List[str] = DataLoader(__a , sampler=__a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__a , __a , __a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a : Union[str, Any] = mask_heads(__a , __a , __a )
prune_heads(__a , __a , __a , __a )
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.