code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class lowerCAmelCase_ ( lowerCAmelCase__ ):
__lowerCamelCase : Union[str, Any] = "transfo-xl"
__lowerCamelCase : int = ["mems"]
__lowerCamelCase : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=267735 , _lowerCAmelCase=[20000, 40000, 200000] , _lowerCAmelCase=1024 , _lowerCAmelCase=1024 , _lowerCAmelCase=16 , _lowerCAmelCase=64 , _lowerCAmelCase=4096 , _lowerCAmelCase=4 , _lowerCAmelCase=False , _lowerCAmelCase=18 , _lowerCAmelCase=1600 , _lowerCAmelCase=1000 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=-1 , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase="normal" , _lowerCAmelCase=0.01 , _lowerCAmelCase=0.01 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0 , **_lowerCAmelCase , ) -> Tuple:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ) -> Any:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 158 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
"""simple docstring"""
import qiskit
def _A ( UpperCamelCase_ : int = 2) -> qiskit.result.counts.Counts:
'''simple docstring'''
__lowercase = qubits
# Using Aer's simulator
__lowercase = qiskit.Aer.get_backend("aer_simulator")
# Creating a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(UpperCamelCase_, UpperCamelCase_)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1, UpperCamelCase_):
# Adding CX (CNOT) gate
circuit.cx(i - 1, UpperCamelCase_)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase_)), list(range(UpperCamelCase_)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowercase = qiskit.execute(UpperCamelCase_, UpperCamelCase_, shots=1000)
return job.result().get_counts(UpperCamelCase_)
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}")
| 17 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A_ ( lowerCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'depth_multiplier' ) )
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=3 , snake_case=32 , snake_case=0.25 , snake_case=8 , snake_case=True , snake_case=1024 , snake_case=32 , snake_case="relu6" , snake_case=0.1 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=10 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = depth_multiplier
lowercase = min_depth
lowercase = tf_padding
lowercase = int(last_hidden_size * depth_multiplier )
lowercase = output_stride
lowercase = hidden_act
lowercase = classifier_dropout_prob
lowercase = use_labels
lowercase = is_training
lowercase = num_labels
lowercase = initializer_range
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = MobileNetVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = MobileNetVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_UpperCamelCase : List[str] = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Tuple = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Any = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MobileNetVaModelTester(self )
lowercase = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_UpperCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowercase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 26
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = MobileNetVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(_UpperCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_UpperCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 195 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def a_ ( __lowercase : str ) -> str:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_snake_case = k.replace(__lowercase , __lowercase )
if k.startswith('encoder' ):
_snake_case = k.replace('.attn' , '.self_attn' )
_snake_case = k.replace('norm1' , 'self_attn_layer_norm' )
_snake_case = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_snake_case = k.replace('norm1' , 'self_attn_layer_norm' )
_snake_case = k.replace('norm2' , 'encoder_attn_layer_norm' )
_snake_case = k.replace('norm3' , 'final_layer_norm' )
return k
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_snake_case = sd.pop(__lowercase )
_snake_case = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_snake_case = v
_lowerCamelCase : Union[str, Any] = ['''START''']
@torch.no_grad()
def a_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : Tuple ) -> int:
_snake_case = torch.load(__lowercase , map_location='cpu' )
_snake_case = model['model']
_snake_case = BlenderbotConfig.from_json_file(__lowercase )
_snake_case = BlenderbotForConditionalGeneration(__lowercase )
_snake_case = m.model.state_dict().keys()
_snake_case = []
_snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_snake_case = rename_state_dict_key(__lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowercase )
m.model.load_state_dict(__lowercase , strict=__lowercase )
m.half()
m.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
_lowerCamelCase : Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 282 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class lowercase__ ( lowerCAmelCase__, unittest.TestCase ):
a_ =MarianTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCAmelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = "</s>"
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_UpperCAmelCase ) , 9 )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
lowerCAmelCase__ = en_de_tokenizer(["I am a small frog"] , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase__ = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_UpperCAmelCase , batch.input_ids[0] )
lowerCAmelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_UpperCAmelCase )
lowerCAmelCase__ = [x.name for x in Path(_UpperCAmelCase ).glob("*" )]
self.assertIn("source.spm" , _UpperCAmelCase )
MarianTokenizer.from_pretrained(_UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tok(["I am a tiny frog", "I am a small frog"] , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCAmelCase__ = "Tämä on testi"
lowerCAmelCase__ = "This is a test"
lowerCAmelCase__ = [76, 7, 2047, 2]
lowerCAmelCase__ = [69, 12, 11, 940, 2]
lowerCAmelCase__ = tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase__ = tokenizer(text_target=_UpperCAmelCase ).input_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 340 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
__magic_name__: Union[str, Any] = ["pixel_values"]
def __init__( self : List[str] , _A : bool = True , _A : int = 32 , _A : Tuple=PILImageResampling.BILINEAR , _A : bool = True , **_A : List[str] , ) -> None:
"""simple docstring"""
snake_case_ : Dict = do_resize
snake_case_ : List[str] = do_rescale
snake_case_ : Tuple = size_divisor
snake_case_ : Union[str, Any] = resample
super().__init__(**_UpperCAmelCase )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : np.ndarray , _A : int , _A : List[Any] , _A : Optional[ChannelDimension] = None , **_A : int ) -> np.ndarray:
"""simple docstring"""
snake_case_ ,snake_case_ : int = get_image_size(_UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case_ : Any = height // size_divisor * size_divisor
snake_case_ : int = width // size_divisor * size_divisor
snake_case_ : List[Any] = resize(_UpperCAmelCase , (new_h, new_w) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
return image
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : float , _A : Optional[ChannelDimension] = None , **_A : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def UpperCAmelCase_ ( self : List[str] , _A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _A : Optional[bool] = None , _A : Optional[int] = None , _A : str=None , _A : Optional[bool] = None , _A : Optional[Union[TensorType, str]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[str] , ) -> BatchFeature:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : int = size_divisor if size_divisor is not None else self.size_divisor
snake_case_ : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
snake_case_ : Any = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(_UpperCAmelCase ) for img in images]
if do_resize:
snake_case_ : List[str] = [self.resize(_UpperCAmelCase , size_divisor=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(_UpperCAmelCase , scale=1 / 255 ) for image in images]
snake_case_ : Any = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
snake_case_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 327 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 0 |
from __future__ import annotations
import requests
lowerCAmelCase_ = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = "new" , _UpperCamelCase = None ) -> dict:
"""simple docstring"""
snake_case_ : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_UpperCamelCase ) - valid_terms ) ):
snake_case_ : Any = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_UpperCamelCase )
snake_case_ : Dict = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
snake_case_ : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_UpperCamelCase )}
snake_case_ : str = {}
for id_ in range(_UpperCamelCase ):
snake_case_ : Dict = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 279 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class a :
def __init__( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=50 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_labels
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self : List[Any] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : List[str] ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : Any , ):
_UpperCAmelCase = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
_UpperCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any , ):
_UpperCAmelCase = True
_UpperCAmelCase = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
_UpperCAmelCase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , **__lowerCAmelCase : int , ):
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = BertGenerationDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
# first forward pass
_UpperCAmelCase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase , )
_UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["""hidden_states"""][0]
_UpperCAmelCase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , *__lowerCAmelCase : Tuple , ):
_UpperCAmelCase = BertGenerationDecoder(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
_snake_case : int = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Tuple = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Optional[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = BertGenerationEncoderTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = """bert"""
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(_UpperCAmelCase )[0]
_UpperCAmelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , _UpperCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(_UpperCAmelCase )[0]
_UpperCAmelCase = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , _UpperCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 289 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 0 |
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
SCREAMING_SNAKE_CASE : str = str(bin(a__ ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
SCREAMING_SNAKE_CASE : List[Any] = str(bin(a__ ) )[2:]
if shift_amount >= len(a__ ):
return "0b0"
SCREAMING_SNAKE_CASE : Optional[int] = binary_number[: len(a__ ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE : Dict = '''0''' + str(bin(a__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE : Optional[Any] = len(bin(a__ )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE : Optional[int] = bin(abs(a__ ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE : Any = (
'''1''' + '''0''' * (binary_number_length - len(a__ )) + binary_number
)
if shift_amount >= len(a__ ):
return "0b" + binary_number[0] * len(a__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325 | 0 |
import math
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return math.sqrt(SCREAMING_SNAKE_CASE__ ) * math.sqrt(SCREAMING_SNAKE_CASE__ ) == num
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = 0
UpperCamelCase :Optional[Any] = n
while left <= right:
UpperCamelCase :List[str] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCamelCase :List[Any] = mid - 1
else:
UpperCamelCase :str = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325 | 0 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase__ ( ):
__UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
__UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_lowercase : Optional[int] = make_dataset()
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
for triplet in permutations(snake_case_ , 3 ):
if sum(snake_case_ ) == target:
return tuple(sorted(snake_case_ ) )
return (0, 0, 0)
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
arr.sort()
__UpperCAmelCase = len(snake_case_ )
for i in range(n - 1 ):
__UpperCAmelCase , __UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase__ ( ):
__UpperCAmelCase = '''\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'''
__UpperCAmelCase = '''\ntriplet_sum1(*dataset)\n'''
__UpperCAmelCase = '''\ntriplet_sum2(*dataset)\n'''
__UpperCAmelCase = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=10_000 )
__UpperCAmelCase = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=10_000 )
return (min(snake_case_ ), min(snake_case_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : Optional[int] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 332 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
super().__init__()
_lowerCAmelCase = module
_lowerCAmelCase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
_lowerCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__lowerCamelCase : int = "bigscience/bloom-1b7"
# Constant values
__lowerCamelCase : Any = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
__lowerCamelCase : str = "Hello my name is"
__lowerCamelCase : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
__lowerCamelCase : List[Any] = 10
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class lowerCAmelCase_ ( lowerCAmelCase__ ):
def _snake_case ( self ) -> Union[str, Any]:
super().setUp()
# Models and tokenizer
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
def _snake_case ( self ) -> Optional[Any]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , "quantization_config" ) )
_lowerCAmelCase = config.to_dict()
_lowerCAmelCase = config.to_diff_dict()
_lowerCAmelCase = config.to_json_string()
def _snake_case ( self ) -> Tuple:
from bitsandbytes.nn import Paramsabit
_lowerCAmelCase = self.model_fpaa.get_memory_footprint()
_lowerCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_lowerCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _snake_case ( self ) -> str:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_lowerCAmelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def _snake_case ( self ) -> str:
_lowerCAmelCase = BitsAndBytesConfig()
_lowerCAmelCase = True
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map="auto" )
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_lowerCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def _snake_case ( self ) -> List[str]:
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _snake_case ( self ) -> Tuple:
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_lowerCAmelCase = self.model_fpaa.to(torch.floataa )
_lowerCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
_lowerCAmelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
_lowerCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
_lowerCAmelCase = self.model_fpaa.float()
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=_UpperCAmelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
@classmethod
def _snake_case ( cls ) -> Tuple:
_lowerCAmelCase = "t5-small"
_lowerCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
_lowerCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
_lowerCAmelCase = "Translate in German: Hello, my dog is cute"
def _snake_case ( self ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> int:
from transformers import TaForConditionalGeneration
_lowerCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
_lowerCAmelCase = None
# test with `t5-small`
_lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_lowerCAmelCase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
_lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_lowerCAmelCase = model.generate(**_UpperCAmelCase )
_lowerCAmelCase = modules
def _snake_case ( self ) -> Optional[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_lowerCAmelCase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
_lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_lowerCAmelCase = model.generate(**_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCAmelCase__ ):
def _snake_case ( self ) -> Any:
super().setUp()
# model_name
_lowerCAmelCase = "bigscience/bloom-560m"
_lowerCAmelCase = "t5-small"
# Different types of model
_lowerCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
# Sequence classification model
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
# CausalLM model
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
# Seq2seq model
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map="auto" )
def _snake_case ( self ) -> List[str]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCAmelCase_ ( lowerCAmelCase__ ):
def _snake_case ( self ) -> str:
super().setUp()
def _snake_case ( self ) -> Any:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> int:
_lowerCAmelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_lowerCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCAmelCase_ ( lowerCAmelCase__ ):
def _snake_case ( self ) -> Optional[int]:
super().setUp()
def _snake_case ( self ) -> int:
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
_lowerCAmelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class lowerCAmelCase_ ( lowerCAmelCase__ ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = "facebook/opt-350m"
super().setUp()
def _snake_case ( self ) -> List[str]:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_lowerCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_lowerCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
_lowerCAmelCase = LoRALayer(module.q_proj , rank=16 )
_lowerCAmelCase = LoRALayer(module.k_proj , rank=16 )
_lowerCAmelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
_lowerCAmelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_lowerCAmelCase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCAmelCase_ ( lowerCAmelCase__ ):
__lowerCamelCase : Any = "gpt2-xl"
__lowerCamelCase : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 158 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "roformer"
def __init__( self : Any, UpperCAmelCase__ : Optional[Any]=5_0_0_0_0, UpperCAmelCase__ : Tuple=None, UpperCAmelCase__ : Optional[int]=7_6_8, UpperCAmelCase__ : str=1_2, UpperCAmelCase__ : Tuple=1_2, UpperCAmelCase__ : Tuple=3_0_7_2, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : List[Any]=0.1, UpperCAmelCase__ : List[Any]=1_5_3_6, UpperCAmelCase__ : Optional[Any]=2, UpperCAmelCase__ : str=0.02, UpperCAmelCase__ : str=1E-12, UpperCAmelCase__ : Any=0, UpperCAmelCase__ : List[str]=False, UpperCAmelCase__ : Tuple=True, **UpperCAmelCase__ : int, ):
super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size if embedding_size is None else embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = rotary_value
__lowercase = use_cache
class _lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[Any] ):
if self.task == "multiple-choice":
__lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
__lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 17 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
from math import isqrt, loga
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 80_0800 , __SCREAMING_SNAKE_CASE = 80_0800 ):
lowercase = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase = int(__SCREAMING_SNAKE_CASE )
lowercase = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
def a_ ( __lowercase : int = 1_000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 282 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
@staticmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase )
__lowercase = kwargs.pop('in_order' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]:
if log_level is None:
__lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE )
__lowercase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 325 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=0.999 , UpperCamelCase_ : Optional[Any]="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = i / num_diffusion_timesteps
lowerCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class lowercase__ ( lowerCAmelCase__, lowerCAmelCase__ ):
a_ =[e.name for e in KarrasDiffusionSchedulers]
a_ =2
@register_to_config
def __init__( self , __UpperCAmelCase = 1000 , __UpperCAmelCase = 0.00_085 , __UpperCAmelCase = 0.012 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = 1.0 , __UpperCAmelCase = "linspace" , __UpperCAmelCase = 0 , )-> str:
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ = torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ = betas_for_alpha_bar(_UpperCAmelCase , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
lowerCAmelCase__ = betas_for_alpha_bar(_UpperCAmelCase , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowerCAmelCase__ = 1.0 - self.betas
lowerCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase__ = use_karras_sigmas
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> Any:
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ = self.timesteps
lowerCAmelCase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ = 1 if len(_UpperCAmelCase ) > 1 else 0
else:
lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep
lowerCAmelCase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = self.index_for_timestep(_UpperCAmelCase )
lowerCAmelCase__ = self.sigmas[step_index]
lowerCAmelCase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = num_inference_steps
lowerCAmelCase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ = (np.arange(_UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(_UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
lowerCAmelCase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ = np.log(_UpperCAmelCase )
lowerCAmelCase__ = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase ) ) , _UpperCAmelCase )
if self.config.use_karras_sigmas:
lowerCAmelCase__ = self._convert_to_karras(in_sigmas=_UpperCAmelCase , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ = np.array([self._sigma_to_t(_UpperCAmelCase , _UpperCAmelCase ) for sigma in sigmas] )
lowerCAmelCase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase )
lowerCAmelCase__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ = torch.from_numpy(_UpperCAmelCase )
lowerCAmelCase__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_UpperCAmelCase ).startswith("mps" ):
# mps does not support float64
lowerCAmelCase__ = timesteps.to(_UpperCAmelCase , dtype=torch.floataa )
else:
lowerCAmelCase__ = timesteps.to(device=_UpperCAmelCase )
# empty dt and derivative
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ = defaultdict(_UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = np.log(_UpperCAmelCase )
# get distribution
lowerCAmelCase__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ = low_idx + 1
lowerCAmelCase__ = log_sigmas[low_idx]
lowerCAmelCase__ = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ = (low - log_sigma) / (low - high)
lowerCAmelCase__ = np.clip(_UpperCAmelCase , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ = t.reshape(sigma.shape )
return t
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = in_sigmas[-1].item()
lowerCAmelCase__ = in_sigmas[0].item()
lowerCAmelCase__ = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ = np.linspace(0 , 1 , _UpperCAmelCase )
lowerCAmelCase__ = sigma_min ** (1 / rho)
lowerCAmelCase__ = sigma_max ** (1 / rho)
lowerCAmelCase__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
return self.dt is None
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , )-> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase__ = self.index_for_timestep(_UpperCAmelCase )
# advance index counter by 1
lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ = self.sigmas[step_index]
lowerCAmelCase__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ = self.sigmas[step_index - 1]
lowerCAmelCase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ = 0
lowerCAmelCase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
lowerCAmelCase__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ = derivative
lowerCAmelCase__ = dt
lowerCAmelCase__ = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ = self.dt
lowerCAmelCase__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase ):
# mps does not support float64
lowerCAmelCase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ = self.timesteps.to(original_samples.device )
lowerCAmelCase__ = timesteps.to(original_samples.device )
lowerCAmelCase__ = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase ) for t in timesteps]
lowerCAmelCase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ = sigma.unsqueeze(-1 )
lowerCAmelCase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self )-> List[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_SCREAMING_SNAKE_CASE = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
__magic_name__: Union[str, Any] = VOCAB_FILES_NAMES
__magic_name__: str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Any = ["input_ids", "attention_mask"]
def __init__( self : int , _A : Any , _A : str=False , _A : Tuple=False , _A : Any=False , _A : Dict=None , _A : Optional[int]=None , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Dict[str, Any]] = None , **_A : Optional[Any] , ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ : Any = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
snake_case_ : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case_ : Dict = '<|endoftext|>' if eos_token is None else eos_token
snake_case_ : str = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case_ : List[Any] = unk_token if pad_token is None else pad_token
snake_case_ : str = eos_token if bos_token is None else bos_token
else:
snake_case_ : List[Any] = '<pad>' if pad_token is None else pad_token
snake_case_ : Dict = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
snake_case_ : str = do_lower_case
snake_case_ : Any = remove_space
snake_case_ : Optional[Any] = keep_accents
snake_case_ : Optional[Any] = vocab_file
snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
snake_case_ : Optional[int] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case_ : Union[str, Any] = re.compile(
F"""[{''.join(map(_UpperCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = self.__dict__.copy()
snake_case_ : int = None
return state
def __setstate__( self : List[Any] , _A : str ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ : str = {}
snake_case_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase_ ( self : List[Any] , _A : str ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = self.non_printing_characters_re.sub('' , _UpperCAmelCase )
# Normalize whitespaces
snake_case_ : Optional[Any] = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
snake_case_ : List[str] = unicodedata.normalize('NFC' , _UpperCAmelCase )
return text
def UpperCAmelCase_ ( self : List[str] , _A : str , **_A : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.preprocess_text(_UpperCAmelCase )
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def UpperCAmelCase_ ( self : int , _A : str ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def UpperCAmelCase_ ( self : List[Any] , _A : int ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
@staticmethod
def UpperCAmelCase_ ( _A : str ) -> str:
"""simple docstring"""
return out_string
def UpperCAmelCase_ ( self : Union[str, Any] , _A : List[str] ) -> str:
"""simple docstring"""
snake_case_ : int = []
snake_case_ : Tuple = ''
snake_case_ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
snake_case_ : int = True
snake_case_ : Optional[int] = []
else:
current_sub_tokens.append(_UpperCAmelCase )
snake_case_ : Union[str, Any] = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, int]:
"""simple docstring"""
snake_case_ : List[str] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self : Any , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ : Dict = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
snake_case_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, List[str]] , _A : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ : Optional[int] = self.preprocess_text(_UpperCAmelCase )
snake_case_ : Optional[int] = self.sp_model.encode(_UpperCAmelCase )
else:
snake_case_ : Tuple = [self.preprocess_text(_UpperCAmelCase ) for t in text]
snake_case_ : List[str] = self.sp_model.encode(_UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
snake_case_ : List[str] = torch.tensor(_UpperCAmelCase )
return token_ids
def UpperCAmelCase_ ( self : str , _A : Union[int, List[int]] ) -> str:
"""simple docstring"""
return self.sp_model.decode(_UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[int] , _A : "Conversation" ) -> List[int]:
"""simple docstring"""
snake_case_ : Dict = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
snake_case_ : Any = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(_UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=_UpperCAmelCase )
| 327 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = "WhisperFeatureExtractor"
lowerCamelCase_ : List[str] = "WhisperTokenizer"
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Union[str, Any] = self.feature_extractor
snake_case_ : Optional[Any] = False
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=True ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_UpperCAmelCase , language=_UpperCAmelCase , no_timestamps=_UpperCAmelCase )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ : List[Any] = kwargs.pop('''audio''' , _UpperCAmelCase )
snake_case_ : List[Any] = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
snake_case_ : List[str] = kwargs.pop('''text''' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
snake_case_ : Optional[Any] = args[0]
snake_case_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case_ : List[str] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
snake_case_ : Optional[int] = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ : str = encodings['''input_ids''']
return inputs
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase (self , __magic_name__ , __magic_name__="np" ) -> str:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def __UpperCAmelCase ( lowercase="ro" ,lowercase="en" ,lowercase="wmt16" ,lowercase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
_UpperCAmelCase = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
_UpperCAmelCase = datasets.load_dataset(lowercase ,lowercase )
if save_dir is None:
_UpperCAmelCase = f'''{dataset}-{pair}'''
_UpperCAmelCase = Path(lowercase )
save_dir.mkdir(exist_ok=lowercase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_UpperCAmelCase = """val""" if split == """validation""" else split
_UpperCAmelCase = save_dir.joinpath(f'''{fn}.source''' )
_UpperCAmelCase = save_dir.joinpath(f'''{fn}.target''' )
_UpperCAmelCase = src_path.open("""w+""" )
_UpperCAmelCase = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_UpperCAmelCase = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 289 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCAmelCase_( a__ , a__=() , a__=None , a__="no" , a__="29500" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : int = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE : Optional[int] = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE : Any = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
SCREAMING_SNAKE_CASE : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , a__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : str = PrepareForLaunch(a__ , distributed_type='''TPU''' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(a__ , args=a__ , nprocs=a__ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*a__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ , master_addr='''127.0.01''' , master_port=a__ , mixed_precision=a__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = PrepareForLaunch(a__ , distributed_type='''MULTI_GPU''' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(a__ , args=a__ , nprocs=a__ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*a__ )
def UpperCAmelCase_( a__ , a__=() , a__=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
SCREAMING_SNAKE_CASE : Dict = PrepareForLaunch(a__ , debug=a__ )
start_processes(a__ , args=a__ , nprocs=a__ , start_method='''fork''' )
| 313 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 10
@require_sentencepiece
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[Any] = True
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = sp.SentencePieceProcessor()
spm_model.Load(_UpperCAmelCase )
__lowercase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )]
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = '<pad>'
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 10_01 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase__ : Dict = "C'est trop cool"
lowerCAmelCase__ : List[Any] = "Esto es genial"
@classmethod
def a__ ( cls : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def a__ ( self : str ) -> int:
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'fr'
__lowercase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__lowercase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 325 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ : int ="naver-clova-ix/donut-base-finetuned-docvqa"
UpperCamelCase_ : List[Any] =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCamelCase_ : Union[str, Any] ="document_qa"
UpperCamelCase_ : Any =AutoProcessor
UpperCamelCase_ : Union[str, Any] =VisionEncoderDecoderModel
UpperCamelCase_ : Optional[Any] =["image", "text"]
UpperCamelCase_ : List[Any] =["text"]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :Any = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCamelCase :Optional[int] = task_prompt.replace('''{user_input}''' , _UpperCAmelCase )
UpperCamelCase :Dict = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='''pt''' ).input_ids
UpperCamelCase :Optional[Any] = self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Tuple = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
UpperCamelCase :Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCamelCase :Any = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCamelCase :Union[str, Any] = re.sub(r'''<.*?>''' , '''''' , _UpperCAmelCase , count=1 ).strip() # remove first task start token
UpperCamelCase :Dict = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 259 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :int ):
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__UpperCAmelCase = [p / w for p, w in zip(snake_case_ , snake_case_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__UpperCAmelCase = sorted(snake_case_ )
# declaring useful variables
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__UpperCAmelCase = sorted_profit_by_weight[length - i - 1]
__UpperCAmelCase = profit_by_weight.index(snake_case_ )
__UpperCAmelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_lowercase : str = [int(x) for x in input('Input profits separated by spaces: ').split()]
_lowercase : str = [int(x) for x in input('Input weights separated by spaces: ').split()]
_lowercase : Any = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 332 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 7
if "tiny" in model_name:
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 6, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
_lowerCAmelCase = 128
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (4, 8, 16, 32)
_lowerCAmelCase = 12
_lowerCAmelCase = 512
elif "large" in model_name:
_lowerCAmelCase = 192
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (6, 12, 24, 48)
_lowerCAmelCase = 12
_lowerCAmelCase = 768
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "ade20k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
_lowerCAmelCase = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = val
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
_lowerCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[:dim, :]
_lowerCAmelCase = in_proj_bias[: dim]
_lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase = in_proj_weight[
-dim :, :
]
_lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = x.shape
_lowerCAmelCase = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 )
_lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = x.shape
_lowerCAmelCase = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 )
_lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = x.reshape(4 , in_channel // 4 )
_lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = x.reshape(in_channel // 4 , 4 )
_lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , file_name=SCREAMING_SNAKE_CASE_ )[
"state_dict"
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
_lowerCAmelCase = get_upernet_config(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
_lowerCAmelCase = key.replace("bn" , "batch_norm" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCAmelCase = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ )
if "norm" in key:
_lowerCAmelCase = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
_lowerCAmelCase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCAmelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_lowerCAmelCase = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_lowerCAmelCase = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_lowerCAmelCase = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 158 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ ( lowerCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = 5
# Realm tok
lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowercase = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_UpperCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_config()
lowercase = self.get_dummy_retriever()
lowercase = retriever.tokenizer
lowercase = np.array([0, 3] , dtype='long' )
lowercase = tokenizer(['Test question'] ).input_ids
lowercase = tokenizer(
['the fourth'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
lowercase = config.reader_seq_len
lowercase , lowercase , lowercase , lowercase = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np' )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_config()
lowercase = self.get_dummy_retriever()
lowercase = retriever.tokenizer
lowercase = np.array([0, 3, 5] , dtype='long' )
lowercase = tokenizer(['Test question'] ).input_ids
lowercase = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
lowercase = config.reader_seq_len
lowercase , lowercase , lowercase , lowercase = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np' )
self.assertEqual([False, True, True] , _UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
lowercase = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 195 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : Optional[int] , lowercase : List[str] , lowercase : bool = True , lowercase : bool = False ):
'''simple docstring'''
_snake_case = scheduler
_snake_case = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
_snake_case = split_batches
_snake_case = step_with_optimizer
_snake_case = GradientState()
def A ( self : Optional[int] , *lowercase : int , **lowercase : str ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_snake_case = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def A ( self : Optional[int] ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def A ( self : List[str] ):
'''simple docstring'''
return self.scheduler.state_dict()
def A ( self : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
self.scheduler.load_state_dict(_UpperCAmelCase )
def A ( self : Dict ):
'''simple docstring'''
return self.scheduler.get_lr()
def A ( self : Union[str, Any] , *lowercase : Union[str, Any] , **lowercase : List[str] ):
'''simple docstring'''
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase ) | 282 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase__ ( lowerCAmelCase__ ):
a_ ="decision_transformer"
a_ =["past_key_values"]
a_ ={
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , )-> str:
'''simple docstring'''
lowerCAmelCase__ = state_dim
lowerCAmelCase__ = act_dim
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = max_ep_len
lowerCAmelCase__ = action_tanh
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = n_inner
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scale_attn_weights
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = scale_attn_by_inverse_layer_idx
lowerCAmelCase__ = reorder_and_upcast_attn
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 340 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(__a , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : int = _distribute_shards(**__a )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : str = _split_gen_kwargs(__a , __a )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if expected is RuntimeError:
with pytest.raises(__a ):
_number_of_shards_in_gen_kwargs(__a )
else:
snake_case_ : Any = _number_of_shards_in_gen_kwargs(__a )
assert out == expected
| 327 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 0 |
import math
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
snake_case_ : Optional[int] = range(3 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , **_UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : int = factor * value
snake_case_ : Dict = value
while not is_prime(_UpperCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_UpperCamelCase )
return value
| 279 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCAmelCase__ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCAmelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if "://" in dataset_path:
_UpperCAmelCase = dataset_path.split("""://""" )[1]
return dataset_path
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = not is_remote_filesystem(lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase ) ,fs._strip_protocol(lowercase ) )
else:
fs.mv(lowercase ,lowercase ,recursive=lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
if hasattr(fsspec.asyn ,"""reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = threading.Lock()
| 289 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : int = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = "timesformer"
def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=8 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase="divided_space_time" , _lowerCamelCase=0 , **_lowerCamelCase , ) ->Tuple:
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : int = num_frames
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
SCREAMING_SNAKE_CASE : List[str] = attention_type
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
| 313 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ : Tuple =""
UpperCamelCase_ : List[str] ="hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
super().__init__(self , **_UpperCAmelCase )
UpperCamelCase :int = repo_info
UpperCamelCase :List[str] = token
UpperCamelCase :Tuple = None
def UpperCAmelCase ( self ) -> int:
if self.dir_cache is None:
UpperCamelCase :Tuple = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCamelCase :int = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_UpperCAmelCase ): {'''name''': str(_UpperCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "rb" , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
if not isinstance(self.repo_info , _UpperCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
UpperCamelCase :Tuple = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
self._get_dirs()
UpperCamelCase :List[str] = self._strip_protocol(_UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_UpperCAmelCase )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> int:
self._get_dirs()
UpperCamelCase :Optional[int] = PurePosixPath(path.strip('''/''' ) )
UpperCamelCase :Optional[int] = {}
for p, f in self.dir_cache.items():
UpperCamelCase :int = PurePosixPath(p.strip('''/''' ) )
UpperCamelCase :Any = p.parent
if root == path:
UpperCamelCase :Tuple = f
UpperCamelCase :Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 259 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowercase : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , *_lowercase : Optional[Any] , **_lowercase : int ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 332 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
_SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase_ ( lowerCAmelCase__ ):
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase = {"source": "What is love ?", "target": "life"}
_lowerCAmelCase = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCAmelCase = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(_UpperCAmelCase , f'''{split}.{field}''' ) , "w" ) as f:
f.write(_UpperCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = "pytorch" ) -> str:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = os.path.join(_UpperCAmelCase , "output" )
_lowerCAmelCase = os.path.join(_UpperCAmelCase , "data" )
self._create_dummy_data(data_dir=_UpperCAmelCase )
_lowerCAmelCase = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
_lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
_lowerCAmelCase = os.path.join(_UpperCAmelCase , "metrics.json" )
with open(_UpperCAmelCase ) as f:
_lowerCAmelCase = json.load(_UpperCAmelCase )
return result
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 158 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowercase ( cls : str ):
__lowercase = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def _lowercase ( cls : Optional[int] ):
try:
delete_repo(token=cls._token, repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-config" )
except HTTPError:
pass
def _lowercase ( self : Union[str, Any] ):
__lowercase = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub("test-config", use_auth_token=self._token )
__lowercase = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase, repo_id="test-config", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
__lowercase = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
def _lowercase ( self : Optional[Any] ):
__lowercase = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub("valid_org/test-config-org", use_auth_token=self._token )
__lowercase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase, repo_id="valid_org/test-config-org", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
__lowercase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
def _lowercase ( self : Optional[Any] ):
CustomConfig.register_for_auto_class()
__lowercase = CustomConfig(attribute=4_2 )
config.push_to_hub("test-dynamic-config", use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig"} )
__lowercase = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""", trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, "CustomConfig" )
self.assertEqual(new_config.attribute, 4_2 )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ):
__lowercase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowercase = c.n_embd + 1 # int
__lowercase = c.resid_pdrop + 1.0 # float
__lowercase = not c.scale_attn_weights # bool
__lowercase = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_UpperCAmelCase, c.n_embd, "mismatch for key: n_embd" )
self.assertEqual(_UpperCAmelCase, c.resid_pdrop, "mismatch for key: resid_pdrop" )
self.assertEqual(_UpperCAmelCase, c.scale_attn_weights, "mismatch for key: scale_attn_weights" )
self.assertEqual(_UpperCAmelCase, c.summary_type, "mismatch for key: summary_type" )
def _lowercase ( self : Dict ):
__lowercase = PretrainedConfig()
__lowercase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_UpperCAmelCase, ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__lowercase = [key for key, value in config_common_kwargs.items() if value == getattr(_UpperCAmelCase, _UpperCAmelCase )]
if len(_UpperCAmelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {", ".join(_UpperCAmelCase )}.""" )
def _lowercase ( self : Any ):
with self.assertRaises(_UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder", subfolder="bert" )
self.assertIsNotNone(_UpperCAmelCase )
def _lowercase ( self : Any ):
__lowercase = mock.Mock()
__lowercase = 5_0_0
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head:
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Dict ):
__lowercase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoConfig.from_pretrained("bert-base-cased" )
__lowercase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_UpperCAmelCase )
__lowercase = 2
json.dump(configuration.to_dict(), open(os.path.join(_UpperCAmelCase, "config.4.0.0.json" ), "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowercase = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowercase = ["config.42.0.0.json"]
__lowercase = 7_6_8
configuration.save_pretrained(_UpperCAmelCase )
shutil.move(os.path.join(_UpperCAmelCase, "config.4.0.0.json" ), os.path.join(_UpperCAmelCase, "config.42.0.0.json" ) )
__lowercase = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def _lowercase ( self : Union[str, Any] ):
__lowercase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__lowercase = "v4.0.0"
__lowercase ,__lowercase = new_transformers.models.auto.AutoConfig.from_pretrained(
_UpperCAmelCase, return_unused_kwargs=_UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_UpperCAmelCase, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowercase = "v3.0.0"
__lowercase = old_transformers.models.auto.AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 17 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = len(__SCREAMING_SNAKE_CASE )
lowercase = [[0] * n for i in range(__SCREAMING_SNAKE_CASE )]
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase = y_points[i]
for i in range(2 , __SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_lowerCamelCase : List[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_lowerCamelCase : List[str] = '''▁'''
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , lowercase : str , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Optional[int]=False , lowercase : List[str]="[CLS]" , lowercase : List[Any]="[SEP]" , lowercase : Optional[int]="<unk>" , lowercase : Optional[int]="[SEP]" , lowercase : Optional[Any]="<pad>" , lowercase : Union[str, Any]="[CLS]" , lowercase : Dict="[MASK]" , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : int , lowercase : str ):
'''simple docstring'''
if self.remove_space:
_snake_case = ' '.join(inputs.strip().split() )
else:
_snake_case = inputs
_snake_case = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_snake_case = unicodedata.normalize('NFKD' , _UpperCAmelCase )
_snake_case = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
_snake_case = outputs.lower()
return outputs
def A ( self : List[str] , lowercase : str ):
'''simple docstring'''
_snake_case = self.preprocess_text(_UpperCAmelCase )
_snake_case = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
_snake_case = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case = cur_pieces[1:]
else:
_snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(_UpperCAmelCase )
def A ( self : str , lowercase : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(_UpperCAmelCase )
def A ( self : str , lowercase : str ):
'''simple docstring'''
_snake_case = []
_snake_case = ''
_snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_snake_case = True
_snake_case = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_snake_case = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def A ( self : List[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def A ( self : List[str] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Union[str, Any] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,) | 282 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
@staticmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase )
__lowercase = kwargs.pop('in_order' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]:
if log_level is None:
__lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE )
__lowercase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 325 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=4 , )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowerCAmelCase__, unittest.TestCase ):
a_ =(
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowerCAmelCase__ = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
lowerCAmelCase__ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_SCREAMING_SNAKE_CASE = """
import os
"""
_SCREAMING_SNAKE_CASE = """
def foo():
import os
return False
"""
_SCREAMING_SNAKE_CASE = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , __a )
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = os.path.join(__a , 'test_file.py' )
with open(__a , 'w' ) as _tmp_file:
_tmp_file.write(__a )
snake_case_ : Optional[int] = get_imports(__a )
assert parsed_imports == ["os"]
| 327 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
import mpmath # for roots of unity
import numpy as np
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None ) -> int:
'''simple docstring'''
__snake_case : Tuple = list(poly_a or [0] )[:]
__snake_case : Dict = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__snake_case : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__snake_case : int = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__snake_case : Union[str, Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__snake_case : Tuple = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__snake_case : str = self.__multiply()
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
__snake_case : str = self.c_max_length // 2
while next_ncol > 0:
__snake_case : str = [[] for i in range(UpperCAmelCase )]
__snake_case : Optional[int] = self.root**next_ncol
# First half of next step
__snake_case : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__snake_case : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__snake_case : Union[str, Any] = new_dft
__snake_case : Any = next_ncol // 2
return dft[0]
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.__dft("A" )
__snake_case : Dict = self.__dft("B" )
__snake_case : List[str] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__snake_case : Optional[Any] = 2
while next_ncol <= self.c_max_length:
__snake_case : List[str] = [[] for i in range(UpperCAmelCase )]
__snake_case : Tuple = self.root ** (next_ncol // 2)
__snake_case : int = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__snake_case : Union[str, Any] = new_inverse_c
next_ncol *= 2
# Unpack
__snake_case : List[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Dict:
'''simple docstring'''
__snake_case : Dict = "A = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
__snake_case : int = "B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
__snake_case : Any = "A*B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326 | 1 |
import requests
_UpperCamelCase = '''YOUR API KEY'''
def lowerCAmelCase__( lowercase : str , lowercase : str = giphy_api_key ) -> list:
__snake_case : List[str] = "+".join(query.split() )
__snake_case : Optional[int] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__snake_case : str = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 326 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
__snake_case : Optional[int] = proj_size
__snake_case : str = CLIPVisionModel(UpperCAmelCase )
__snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase )
__snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size )
__snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.model(pixel_values=UpperCAmelCase )
__snake_case : Optional[int] = clip_output.pooler_output
__snake_case : Any = self.mapper(latent_states[:, None] )
__snake_case : Any = self.final_layer_norm(UpperCAmelCase )
__snake_case : str = self.proj_out(UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case : List[Any] = (config.num_hidden_layers + 1) // 5
__snake_case : Dict = config.hidden_size
__snake_case : str = 1
__snake_case : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase )
for _ in range(UpperCAmelCase )
] )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
for block in self.blocks:
__snake_case : int = block(UpperCAmelCase )
return hidden_states
| 326 | 1 |
import math
def lowerCAmelCase__( lowercase : int ) -> str:
__snake_case : List[str] = 0
__snake_case : Union[str, Any] = 0
while num > 0:
__snake_case : Union[str, Any] = num % 8
__snake_case : List[str] = octal + (remainder * math.floor(math.pow(10 , lowercase ) ))
counter += 1
__snake_case : List[str] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(lowercase )}"""
def lowerCAmelCase__( ) -> None:
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 326 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Optional[int] =None
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = os.path.join(UpperCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCAmelCase )
__snake_case : Tuple = self.feature_extraction_class.from_json_file(UpperCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[Any] = feat_extract_first.save_pretrained(UpperCAmelCase )[0]
check_json_file_has_correct_format(UpperCAmelCase )
__snake_case : Any = self.feature_extraction_class.from_pretrained(UpperCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(UpperCAmelCase )
| 326 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = torch.device('''cpu''')
def lowerCAmelCase__( ) -> Any:
__snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCAmelCase__( lowercase : Dict ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = dct.pop(lowercase )
__snake_case : List[Any] = val
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = []
for k in state_dict.keys():
__snake_case : Union[str, Any] = k
if ".pwconv" in k:
__snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__snake_case : int = k_new.split("." )
if ls[2].isdigit():
__snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]:
__snake_case : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Tuple = 1000
__snake_case : Any = "huggingface/label-files"
__snake_case : int = "imagenet-1k-id2label.json"
__snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
__snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__snake_case : Optional[Any] = [3, 3, 6, 4]
__snake_case : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__snake_case : List[str] = [3, 3, 9, 6]
__snake_case : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__snake_case : Optional[int] = [4, 3, 10, 5]
__snake_case : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__snake_case : str = [4, 4, 12, 6]
__snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
__snake_case : Tuple = torch.load(lowercase , map_location="cpu" )
__snake_case : Optional[int] = checkpoint
__snake_case : Any = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
__snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
__snake_case : str = get_expected_output(lowercase )
__snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 326 | 1 |
def lowerCAmelCase__( lowercase : list ) -> list:
if len(lowercase ) < 2:
return collection
def circle_sort_util(lowercase : list , lowercase : int , lowercase : int ) -> bool:
__snake_case : Optional[int] = False
if low == high:
return swapped
__snake_case : str = low
__snake_case : str = high
while left < right:
if collection[left] > collection[right]:
__snake_case , __snake_case : Dict = (
collection[right],
collection[left],
)
__snake_case : Union[str, Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__snake_case , __snake_case : List[Any] = (
collection[right + 1],
collection[left],
)
__snake_case : Tuple = True
__snake_case : int = low + int((high - low) / 2 )
__snake_case : Optional[int] = circle_sort_util(lowercase , lowercase , lowercase )
__snake_case : Optional[int] = circle_sort_util(lowercase , mid + 1 , lowercase )
return swapped or left_swap or right_swap
__snake_case : Dict = True
while is_not_sorted is True:
__snake_case : Dict = circle_sort_util(lowercase , 0 , len(lowercase ) - 1 )
return collection
if __name__ == "__main__":
_UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
_UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 326 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase__( lowercase : str ) -> List[str]:
__snake_case : int = git.Repo(search_parent_directories=lowercase )
__snake_case : Union[str, Any] = {
"repo_id": str(lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f:
json.dump(lowercase , lowercase , indent=4 )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]:
if params.n_gpu <= 0:
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = -1
__snake_case : Union[str, Any] = True
__snake_case : Tuple = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : int = int(os.environ["N_GPU_NODE"] )
__snake_case : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
__snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node
__snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node
__snake_case : Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case : Any = 1
__snake_case : str = 0
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
__snake_case : int = 1
__snake_case : Optional[Any] = 1
__snake_case : Tuple = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0
__snake_case : List[Any] = params.n_nodes > 1
# summary
__snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 326 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : CommonSchedulerState
# setable values
UpperCAmelCase_ : jnp.ndarray
UpperCAmelCase_ : jnp.ndarray
UpperCAmelCase_ : Optional[int] =None
@classmethod
def UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return cls(common=UpperCAmelCase , init_noise_sigma=UpperCAmelCase , timesteps=UpperCAmelCase )
@dataclass
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : DDPMSchedulerState
class _lowerCamelCase ( a , a ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCAmelCase_ : jnp.dtype
@property
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
return True
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = 0.0_001 , UpperCAmelCase = 0.02 , UpperCAmelCase = "linear" , UpperCAmelCase = None , UpperCAmelCase = "fixed_small" , UpperCAmelCase = True , UpperCAmelCase = "epsilon" , UpperCAmelCase = jnp.floataa , ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = dtype
def UpperCAmelCase ( self , UpperCAmelCase = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
__snake_case : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__snake_case : Optional[int] = jnp.array(1.0 , dtype=self.dtype )
__snake_case : str = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCAmelCase , init_noise_sigma=UpperCAmelCase , timesteps=UpperCAmelCase , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = () ) -> DDPMSchedulerState:
'''simple docstring'''
__snake_case : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__snake_case : List[Any] = (jnp.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = state.common.alphas_cumprod[t]
__snake_case : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__snake_case : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__snake_case : Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__snake_case : List[str] = jnp.clip(UpperCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__snake_case : Optional[Any] = jnp.log(jnp.clip(UpperCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__snake_case : int = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__snake_case : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__snake_case : Any = variance
__snake_case : Any = state.common.betas[t]
__snake_case : Tuple = (predicted_variance + 1) / 2
__snake_case : int = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
__snake_case : int = timestep
if key is None:
__snake_case : List[str] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case : Any = jnp.split(UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__snake_case : Tuple = None
# 1. compute alphas, betas
__snake_case : List[Any] = state.common.alphas_cumprod[t]
__snake_case : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__snake_case : Any = 1 - alpha_prod_t
__snake_case : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__snake_case : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__snake_case : Any = model_output
elif self.config.prediction_type == "v_prediction":
__snake_case : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__snake_case : Dict = jnp.clip(UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__snake_case : Optional[int] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__snake_case : Optional[Any] = jax.random.split(UpperCAmelCase , num=1 )
__snake_case : int = jax.random.normal(UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCAmelCase , UpperCAmelCase , predicted_variance=UpperCAmelCase ) ** 0.5) * noise
__snake_case : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__snake_case : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCAmelCase , state=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 326 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case : List[Any] = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase ) , torch_builtin(UpperCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase ) , gelu_new(UpperCAmelCase ) ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case : Any = get_activation("gelu" )
__snake_case : Any = get_activation("gelu_10" )
__snake_case : Dict = torch_builtin(UpperCAmelCase )
__snake_case : int = geluaa(UpperCAmelCase )
__snake_case : List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(UpperCAmelCase ):
get_activation("bogus" )
with self.assertRaises(UpperCAmelCase ):
get_activation(UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = get_activation("gelu" )
__snake_case : List[str] = 1
__snake_case : Optional[Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase ):
__snake_case : Dict = acta.a
| 326 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : str =None
@staticmethod
def UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="optuna"
@staticmethod
def UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="ray"
UpperCAmelCase_ : Dict ="'ray[tune]'"
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="sigopt"
@staticmethod
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="wandb"
@staticmethod
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
_UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase ) > 0:
__snake_case : Dict = available_backends[0].name
if len(lowercase ) > 1:
logger.info(
f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 326 | 1 |
import string
import numpy
def lowerCAmelCase__( lowercase : int , lowercase : int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , lowercase )
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Any =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase_ : List[str] =numpy.vectorize(lambda a : x % 36 )
UpperCAmelCase_ : Any =numpy.vectorize(a )
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : str = self.modulus(UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__snake_case : List[Any] = encrypt_key.shape[0]
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.key_string.index(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self.key_string[round(UpperCAmelCase )]
def UpperCAmelCase ( self ) -> None:
'''simple docstring'''
__snake_case : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__snake_case : Union[str, Any] = det % len(self.key_string )
__snake_case : int = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase , len(self.key_string ) ) != 1:
__snake_case : Union[str, Any] = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : str = [char for char in text.upper() if char in self.key_string]
__snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase ) % self.break_key != 0:
chars.append(UpperCAmelCase )
return "".join(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Dict = self.process_text(text.upper() )
__snake_case : Tuple = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__snake_case : Optional[Any] = text[i : i + self.break_key]
__snake_case : int = [self.replace_letters(UpperCAmelCase ) for char in batch]
__snake_case : Tuple = numpy.array([vec] ).T
__snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase ) ).T.tolist()[
0
]
__snake_case : List[str] = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase ( self ) -> numpy.ndarray:
'''simple docstring'''
__snake_case : Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__snake_case : Optional[int] = det % len(self.key_string )
__snake_case : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__snake_case : List[str] = i
break
__snake_case : Union[str, Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase ) )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : int = self.make_decrypt_key()
__snake_case : Dict = self.process_text(text.upper() )
__snake_case : Dict = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__snake_case : Optional[int] = text[i : i + self.break_key]
__snake_case : Dict = [self.replace_letters(UpperCAmelCase ) for char in batch]
__snake_case : Optional[int] = numpy.array([vec] ).T
__snake_case : Any = self.modulus(decrypt_key.dot(UpperCAmelCase ) ).T.tolist()[0]
__snake_case : List[Any] = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__( ) -> None:
__snake_case : List[Any] = int(input("Enter the order of the encryption key: " ) )
__snake_case : List[str] = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(lowercase ):
__snake_case : Tuple = [int(lowercase ) for x in input().split()]
hill_matrix.append(lowercase )
__snake_case : Optional[int] = HillCipher(numpy.array(lowercase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
__snake_case : List[Any] = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
__snake_case : Optional[Any] = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(lowercase ) )
elif option == "2":
__snake_case : List[Any] = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 326 |
import math
def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list:
__snake_case : Any = end or len(lowercase )
for i in range(lowercase , lowercase ):
__snake_case : List[str] = i
__snake_case : Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case : Optional[Any] = array[temp_index - 1]
temp_index -= 1
__snake_case : Any = temp_index_value
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap
__snake_case : Any = index
__snake_case : Optional[Any] = 2 * index + 1 # Left Node
__snake_case : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case : Tuple = right_index
if largest != index:
__snake_case , __snake_case : int = array[largest], array[index]
heapify(lowercase , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list ) -> list:
__snake_case : List[str] = len(lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(lowercase , lowercase , lowercase )
for i in range(n - 1 , 0 , -1 ):
__snake_case , __snake_case : Optional[Any] = array[0], array[i]
heapify(lowercase , 0 , lowercase )
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
__snake_case : Union[str, Any] = low
__snake_case : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case : str = array[j], array[i]
i += 1
def lowerCAmelCase__( lowercase : list ) -> list:
if len(lowercase ) == 0:
return array
__snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) )
__snake_case : Dict = 16
return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
__snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase )
intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase )
__snake_case : List[str] = p
return insertion_sort(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip()
_UpperCamelCase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 326 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCamelCase = ['''gpt2''']
_UpperCamelCase = '''gpt2'''
if is_tf_available():
class _lowerCamelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> int:
'''simple docstring'''
super().__init__()
__snake_case : int = tokenizer
__snake_case : Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase )
__snake_case : List[str] = TFGPTaLMHeadModel.from_config(UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer(UpperCAmelCase )
__snake_case : Tuple = tokenized["input_ids"].to_tensor()
__snake_case : int = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__snake_case : Optional[int] = self.model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )["logits"]
return outputs
@require_tf
@require_keras_nlp
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
__snake_case : Union[str, Any] = [GPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__snake_case : Optional[Any] = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__snake_case : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__snake_case : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__snake_case : Any = tokenizer([test_inputs] , return_tensors="tf" )
__snake_case : Optional[Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__snake_case : Any = python_outputs[key].numpy()
__snake_case : Tuple = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__snake_case : Tuple = tf.function(UpperCAmelCase )
for test_inputs in self.test_sentences:
__snake_case : str = tf.constant(UpperCAmelCase )
__snake_case : Dict = compiled_tokenizer(UpperCAmelCase )
__snake_case : Optional[int] = tf_tokenizer(UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCAmelCase )
__snake_case : int = tf.convert_to_tensor([self.test_sentences[0]] )
__snake_case : Union[str, Any] = model.serving(UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__snake_case : int = Path(UpperCAmelCase ) / "saved.model"
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"serving_default": model.serving} )
__snake_case : Any = tf.saved_model.load(UpperCAmelCase )
__snake_case : Dict = loaded_model.signatures["serving_default"](UpperCAmelCase )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__snake_case : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
__snake_case : Any = tf_tokenizer(UpperCAmelCase ) # Build model with some sample inputs
__snake_case : Any = tf_tokenizer.get_config()
__snake_case : List[Any] = TFGPTaTokenizer.from_config(UpperCAmelCase )
__snake_case : List[str] = model_from_config(UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__snake_case : str = 123123
for max_length in [3, 5, 1024]:
__snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
__snake_case : Any = tf_tokenizer(UpperCAmelCase , max_length=UpperCAmelCase )
__snake_case : str = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 326 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any:
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = search_prob
__snake_case : str = start_temperate
__snake_case : List[Any] = []
__snake_case : str = 0
__snake_case : Dict = None
while not search_end:
__snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case : List[Any] = current_state
scores.append(lowercase )
iterations += 1
__snake_case : Dict = None
__snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
__snake_case : int = neighbors.pop(lowercase )
__snake_case : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case : List[str] = picked_neighbor
else:
__snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case : str = picked_neighbor
__snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case : Optional[Any] = True
else:
__snake_case : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any:
return (3 * x**2) - (6 * y)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 326 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[Any] , lowercase : List[str] ) -> Optional[int]:
__snake_case : Optional[Any] = OmegaConf.load(lowercase )
__snake_case : int = torch.load(lowercase , map_location="cpu" )["model"]
__snake_case : str = list(state_dict.keys() )
# extract state_dict for VQVAE
__snake_case : List[Any] = {}
__snake_case : int = "first_stage_model."
for key in keys:
if key.startswith(lowercase ):
__snake_case : Optional[int] = state_dict[key]
# extract state_dict for UNetLDM
__snake_case : List[str] = {}
__snake_case : List[Any] = "model.diffusion_model."
for key in keys:
if key.startswith(lowercase ):
__snake_case : List[Any] = state_dict[key]
__snake_case : Union[str, Any] = config.model.params.first_stage_config.params
__snake_case : Union[str, Any] = config.model.params.unet_config.params
__snake_case : str = VQModel(**lowercase ).eval()
vqvae.load_state_dict(lowercase )
__snake_case : Optional[Any] = UNetLDMModel(**lowercase ).eval()
unet.load_state_dict(lowercase )
__snake_case : Dict = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowercase , )
__snake_case : List[Any] = LDMPipeline(lowercase , lowercase , lowercase )
pipeline.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 326 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : Tuple ="FlavaImageProcessor"
UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 326 | 1 |
def lowerCAmelCase__( lowercase : int = 1000 ) -> int:
__snake_case , __snake_case : Optional[int] = 1, 1
__snake_case : List[str] = 2
while True:
__snake_case : Optional[Any] = 0
__snake_case : Any = fa + fa
__snake_case , __snake_case : Optional[int] = fa, f
index += 1
for _ in str(lowercase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 326 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
_UpperCamelCase = {
'''camembert-base''': 512,
}
_UpperCamelCase = '''▁'''
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : str =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
__snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__snake_case : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__snake_case : Optional[int] = len(self.fairseq_tokens_to_ids )
__snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = []
__snake_case : Union[str, Any] = ""
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase ) + token
__snake_case : List[Any] = True
__snake_case : Union[str, Any] = []
else:
current_sub_tokens.append(UpperCAmelCase )
__snake_case : int = False
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : List[str] = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
__snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 326 | 1 |
from __future__ import annotations
from collections import deque
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase )
self.set_fail_transitions()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : List[str] = 0
for character in keyword:
__snake_case : str = self.find_next_state(UpperCAmelCase , UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__snake_case : Any = len(self.adlist ) - 1
else:
__snake_case : int = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase )
def UpperCAmelCase ( self ) -> None:
'''simple docstring'''
__snake_case : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase )
__snake_case : Optional[int] = 0
while q:
__snake_case : str = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase )
__snake_case : Any = self.adlist[r]["fail_state"]
while (
self.find_next_state(UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
__snake_case : str = self.adlist[state]["fail_state"]
__snake_case : Optional[int] = self.find_next_state(
UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
__snake_case : List[str] = 0
__snake_case : Optional[Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase ( self , UpperCAmelCase ) -> dict[str, list[int]]:
'''simple docstring'''
__snake_case : dict = {} # returns a dict with keywords and list of its occurrences
__snake_case : Optional[int] = 0
for i in range(len(UpperCAmelCase ) ):
while (
self.find_next_state(UpperCAmelCase , string[i] ) is None
and current_state != 0
):
__snake_case : Optional[Any] = self.adlist[current_state]["fail_state"]
__snake_case : List[str] = self.find_next_state(UpperCAmelCase , string[i] )
if next_state is None:
__snake_case : Union[str, Any] = 0
else:
__snake_case : Any = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__snake_case : List[Any] = []
result[key].append(i - len(UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool:
__snake_case : List[str] = len(lowercase )
__snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 326 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase = 4
_UpperCamelCase = 3
class _lowerCamelCase ( a ):
"""simple docstring"""
pass
def lowerCAmelCase__( lowercase : List[str] ) -> Any:
for shard in shards:
for i in range(lowercase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : List[Any] = int(os.environ["RANK"] )
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : List[str] = ArgumentParser()
parser.add_argument("--streaming" , type=lowercase )
parser.add_argument("--local_rank" , type=lowercase )
parser.add_argument("--num_workers" , type=lowercase , default=0 )
__snake_case : Any = parser.parse_args()
__snake_case : Dict = args.streaming
__snake_case : Union[str, Any] = args.num_workers
__snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]}
__snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase )
if not streaming:
__snake_case : Any = Dataset.from_list(list(lowercase ) )
__snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase )
__snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase )
__snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__snake_case : Dict = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 326 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
_UpperCamelCase = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Any =PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[str] =SqueezeBertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
__snake_case : Dict = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
__snake_case : str = do_lower_case
__snake_case : List[Any] = strip_accents
__snake_case : List[str] = tokenize_chinese_chars
__snake_case : Optional[int] = normalizer_class(**UpperCAmelCase )
__snake_case : str = do_lower_case
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : str = [self.sep_token_id]
__snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : str = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 326 |
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int:
__snake_case : List[Any] = limit + 1
__snake_case : List[str] = [0] * limit
for first_term in range(1 , lowercase ):
for n in range(lowercase , lowercase , lowercase ):
__snake_case : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_UpperCamelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : List[str]=None , lowercase : Any=None , lowercase : Union[str, Any]=None ) -> List[str]:
__snake_case : Dict = True
while ask_again:
__snake_case : Tuple = input(lowercase )
try:
if default is not None and len(lowercase ) == 0:
return default
return convert_value(lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Tuple=[] , lowercase : Tuple=None , lowercase : Optional[Any]=0 ) -> Any:
__snake_case : Dict = BulletMenu(lowercase , lowercase )
__snake_case : Tuple = menu.run(default_choice=lowercase )
return convert_value(lowercase ) if convert_value is not None else result
def lowerCAmelCase__( lowercase : Tuple ) -> Union[str, Any]:
__snake_case : Optional[Any] = int(lowercase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowerCAmelCase__( lowercase : List[Any] ) -> Tuple:
__snake_case : str = int(lowercase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowerCAmelCase__( lowercase : List[str] ) -> Tuple:
__snake_case : Dict = int(lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase__( lowercase : Optional[int] ) -> Any:
__snake_case : List[str] = int(lowercase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowerCAmelCase__( lowercase : Optional[int] ) -> Optional[int]:
__snake_case : List[Any] = int(lowercase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> str:
return {"yes": True, "no": False}[value.lower()]
class _lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = super()._format_usage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__snake_case : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 326 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 | 1 |
import datasets
_UpperCamelCase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_UpperCamelCase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_UpperCamelCase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def lowerCAmelCase__( lowercase : str , lowercase : Tuple ) -> Optional[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
| 326 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326 | 1 |
def lowerCAmelCase__( lowercase : int , lowercase : int ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326 |
import argparse
import datetime
def lowerCAmelCase__( lowercase : str ) -> str:
__snake_case : int = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
__snake_case : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
__snake_case : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
__snake_case : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
__snake_case : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
__snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
__snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) )
# Start math
if m <= 2:
__snake_case : Optional[Any] = y - 1
__snake_case : Tuple = m + 12
# maths var
__snake_case : int = int(str(lowercase )[:2] )
__snake_case : int = int(str(lowercase )[2:] )
__snake_case : int = int(2.6 * m - 5.3_9 )
__snake_case : int = int(c / 4 )
__snake_case : int = int(k / 4 )
__snake_case : int = int(d + k )
__snake_case : int = int(t + u + v + x )
__snake_case : int = int(z - (2 * c) )
__snake_case : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
__snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 326 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_UpperCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_UpperCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_UpperCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 326 |
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int:
if index == r:
for j in range(lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]:
# A temporary array to store all combination one by one
__snake_case : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 326 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =BertTokenizer
UpperCAmelCase_ : List[Any] =BertTokenizerFast
UpperCAmelCase_ : str =True
UpperCAmelCase_ : Optional[Any] =True
UpperCAmelCase_ : str =filter_non_english
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
__snake_case : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = "UNwant\u00E9d,running"
__snake_case : Dict = "unwanted, running"
return input_text, output_text
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Any = self.tokenizer_class(self.vocab_file )
__snake_case : int = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : Dict = self.get_tokenizer()
__snake_case : str = self.get_rust_tokenizer()
__snake_case : Optional[int] = "UNwant\u00E9d,running"
__snake_case : Optional[Any] = tokenizer.tokenize(UpperCAmelCase )
__snake_case : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__snake_case : Dict = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__snake_case : Optional[int] = self.get_rust_tokenizer()
__snake_case : Any = tokenizer.encode(UpperCAmelCase )
__snake_case : str = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# With lower casing
__snake_case : Optional[int] = self.get_tokenizer(do_lower_case=UpperCAmelCase )
__snake_case : List[Any] = self.get_rust_tokenizer(do_lower_case=UpperCAmelCase )
__snake_case : Optional[int] = "UNwant\u00E9d,running"
__snake_case : Optional[Any] = tokenizer.tokenize(UpperCAmelCase )
__snake_case : Tuple = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__snake_case : int = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__snake_case : str = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__snake_case : str = self.get_rust_tokenizer()
__snake_case : List[str] = tokenizer.encode(UpperCAmelCase )
__snake_case : Dict = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Tuple = BasicTokenizer(do_lower_case=UpperCAmelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = BasicTokenizer()
__snake_case : Tuple = "a\n'll !!to?'d of, can't."
__snake_case : List[Any] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__snake_case : List[Any] = {}
for i, token in enumerate(UpperCAmelCase ):
__snake_case : Dict = i
__snake_case : Optional[Any] = WordpieceTokenizer(vocab=UpperCAmelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
__snake_case : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase )
__snake_case : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase )
__snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__snake_case : List[str] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__snake_case : Tuple = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
__snake_case : Any = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , "do_lower_case" ) else False
__snake_case : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : int = ["的", "人", "有"]
__snake_case : Optional[Any] = "".join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = True
__snake_case : List[str] = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
__snake_case : Any = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__snake_case : List[str] = False
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__snake_case : List[Any] = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__snake_case : Dict = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
__snake_case : List[str] = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case : List[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
| 326 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCAmelCase__( lowercase : str ) -> Optional[Any]:
__snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict:
__snake_case : Tuple = OrderedDict()
__snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__snake_case : Optional[Any] = key
for name_pair in rename_keys_prefix:
__snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__snake_case : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__snake_case : List[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__snake_case : Any = "pretraining"
if "vcr" in checkpoint_path:
__snake_case : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__snake_case : Tuple = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__snake_case : Any = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 512}
__snake_case : Any = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__snake_case : List[Any] = {"visual_embedding_dim": 2048}
__snake_case : Optional[Any] = "vqa_advanced"
elif "vqa" in checkpoint_path:
__snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
__snake_case : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__snake_case : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__snake_case : List[Any] = "nlvr"
__snake_case : Union[str, Any] = VisualBertConfig(**lowercase )
# Load State Dict
__snake_case : Any = load_state_dict(lowercase )
__snake_case : Dict = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
__snake_case : Optional[Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
__snake_case : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
__snake_case : Tuple = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
__snake_case : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 326 | 1 |
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int:
__snake_case : List[Any] = limit + 1
__snake_case : List[str] = [0] * limit
for first_term in range(1 , lowercase ):
for n in range(lowercase , lowercase , lowercase ):
__snake_case : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowercase ) as metadata_file:
__snake_case : int = json.load(lowercase )
__snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Tuple = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
__snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
__snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : Tuple = json.load(lowercase )
__snake_case : List[Any] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
__snake_case : Any = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
__snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : List[Any] = state_dict[bias_name]
__snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
__snake_case : str = state_dict[prefix + matrix_name]
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : str = state_dict[key]
else:
__snake_case : str = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
__snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Union[str, Any] = (0, 9)
__snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Any = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Optional[Any] = torch.Size((1, 33, 768) )
__snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 1, 768) )
__snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : str = MLukeTokenizer.from_pretrained(lowercase )
__snake_case : Dict = "Tokyo is the capital of <mask>."
__snake_case : Union[str, Any] = (24, 30)
__snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : int = model(**lowercase )
__snake_case : Dict = encoding["input_ids"][0].tolist()
__snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
__snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]:
__snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )]
__snake_case : Any = {}
for entry in data:
__snake_case : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Optional[int] = entity_id
break
__snake_case : Union[str, Any] = f"""{language}:{entity_name}"""
__snake_case : Any = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 1 |
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool:
__snake_case : List[str] = len(lowercase )
__snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
from maths.prime_factors import prime_factors
def lowerCAmelCase__( lowercase : int ) -> int:
if not isinstance(lowercase , lowercase ):
__snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] ="detr"
UpperCAmelCase_ : Optional[int] =["past_key_values"]
UpperCAmelCase_ : Optional[int] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=100 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase="resnet50" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : Dict = backbone_config.get("model_type" )
__snake_case : Any = CONFIG_MAPPING[backbone_model_type]
__snake_case : Tuple = config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
__snake_case , __snake_case , __snake_case : Optional[Any] = None, None, None
__snake_case : Dict = use_timm_backbone
__snake_case : int = backbone_config
__snake_case : int = num_channels
__snake_case : List[Any] = num_queries
__snake_case : List[Any] = d_model
__snake_case : Tuple = encoder_ffn_dim
__snake_case : List[Any] = encoder_layers
__snake_case : Any = encoder_attention_heads
__snake_case : List[str] = decoder_ffn_dim
__snake_case : Optional[Any] = decoder_layers
__snake_case : List[str] = decoder_attention_heads
__snake_case : Optional[int] = dropout
__snake_case : List[str] = attention_dropout
__snake_case : Optional[int] = activation_dropout
__snake_case : Any = activation_function
__snake_case : Optional[Any] = init_std
__snake_case : Union[str, Any] = init_xavier_std
__snake_case : Tuple = encoder_layerdrop
__snake_case : str = decoder_layerdrop
__snake_case : Dict = encoder_layers
__snake_case : Optional[int] = auxiliary_loss
__snake_case : Tuple = position_embedding_type
__snake_case : Tuple = backbone
__snake_case : int = use_pretrained_backbone
__snake_case : List[str] = dilation
# Hungarian matcher
__snake_case : str = class_cost
__snake_case : Tuple = bbox_cost
__snake_case : Union[str, Any] = giou_cost
# Loss coefficients
__snake_case : List[str] = mask_loss_coefficient
__snake_case : List[Any] = dice_loss_coefficient
__snake_case : str = bbox_loss_coefficient
__snake_case : Any = giou_loss_coefficient
__snake_case : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return cls(backbone_config=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self ) -> Dict[str, any]:
'''simple docstring'''
__snake_case : str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__snake_case : Tuple = self.backbone_config.to_dict()
__snake_case : int = self.__class__.model_type
return output
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Dict =version.parse("1.11" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return 12
| 326 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__( lowercase : Any ) -> Dict:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Any:
'''simple docstring'''
super().__init__()
__snake_case : List[str] = module
__snake_case : str = nn.Sequential(
nn.Linear(module.in_features , UpperCAmelCase , bias=UpperCAmelCase ) , nn.Linear(UpperCAmelCase , module.out_features , bias=UpperCAmelCase ) , )
__snake_case : List[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> int:
'''simple docstring'''
return self.module(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) + self.adapter(UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="bigscience/bloom-1b7"
# Constant values
UpperCAmelCase_ : Dict =2.109_6595_5269_2574
UpperCAmelCase_ : Tuple ="Hello my name is"
UpperCAmelCase_ : Union[str, Any] =set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
UpperCAmelCase_ : Union[str, Any] =10
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCamelCase ( a ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
__snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.model_abit.config
self.assertTrue(hasattr(UpperCAmelCase , "quantization_config" ) )
__snake_case : Dict = config.to_dict()
__snake_case : int = config.to_diff_dict()
__snake_case : List[str] = config.to_json_string()
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__snake_case : Tuple = self.model_fpaa.get_memory_footprint()
__snake_case : Optional[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case : Any = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = BitsAndBytesConfig()
__snake_case : Any = True
__snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCAmelCase , device_map="auto" )
__snake_case : str = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case : str = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = BitsAndBytesConfig()
with self.assertRaises(UpperCAmelCase ):
__snake_case : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCAmelCase , load_in_abit=UpperCAmelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case : str = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case : int = self.model_fpaa.to(torch.floataa )
__snake_case : Optional[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case : Dict = self.model_fpaa.to("cpu" )
# Check this does not throw an error
__snake_case : Dict = self.model_fpaa.half()
# Check this does not throw an error
__snake_case : List[str] = self.model_fpaa.float()
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=UpperCAmelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> int:
'''simple docstring'''
__snake_case : Dict = "t5-small"
__snake_case : Tuple = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case : Optional[Any] = "Translate in German: Hello, my dog is cute"
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
from transformers import TaForConditionalGeneration
__snake_case : str = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case : str = None
# test with `t5-small`
__snake_case : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
__snake_case : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : int = model.generate(**UpperCAmelCase )
# test with `flan-t5-small`
__snake_case : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
__snake_case : Tuple = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : Optional[int] = model.generate(**UpperCAmelCase )
__snake_case : int = modules
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case : Union[str, Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case : List[str] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : Union[str, Any] = model.generate(**UpperCAmelCase )
# test with `flan-t5-small`
__snake_case : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
__snake_case : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : int = model.generate(**UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# model_name
__snake_case : List[Any] = "bigscience/bloom-560m"
__snake_case : List[Any] = "t5-small"
# Different types of model
__snake_case : Any = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
# Sequence classification model
__snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
# CausalLM model
__snake_case : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase , device_map="auto" )
# Seq2seq model
__snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCAmelCase , device_map="auto" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCamelCase ( a ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : List[str] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCamelCase ( a ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCAmelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
__snake_case : Optional[int] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class _lowerCamelCase ( a ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = "facebook/opt-350m"
super().setUp()
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCAmelCase ) ):
__snake_case : Optional[int] = LoRALayer(module.q_proj , rank=16 )
__snake_case : str = LoRALayer(module.k_proj , rank=16 )
__snake_case : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case : Union[str, Any] = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case : Optional[Any] = model.forward(**UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="gpt2-xl"
UpperCAmelCase_ : Optional[Any] =3.3191_8548_5415_2187
| 326 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
__snake_case : Optional[int] = proj_size
__snake_case : str = CLIPVisionModel(UpperCAmelCase )
__snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase )
__snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size )
__snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.model(pixel_values=UpperCAmelCase )
__snake_case : Optional[int] = clip_output.pooler_output
__snake_case : Any = self.mapper(latent_states[:, None] )
__snake_case : Any = self.final_layer_norm(UpperCAmelCase )
__snake_case : str = self.proj_out(UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case : List[Any] = (config.num_hidden_layers + 1) // 5
__snake_case : Dict = config.hidden_size
__snake_case : str = 1
__snake_case : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase )
for _ in range(UpperCAmelCase )
] )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
for block in self.blocks:
__snake_case : int = block(UpperCAmelCase )
return hidden_states
| 326 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 326 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 326 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = torch.device('''cpu''')
def lowerCAmelCase__( ) -> Any:
__snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCAmelCase__( lowercase : Dict ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = dct.pop(lowercase )
__snake_case : List[Any] = val
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = []
for k in state_dict.keys():
__snake_case : Union[str, Any] = k
if ".pwconv" in k:
__snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__snake_case : int = k_new.split("." )
if ls[2].isdigit():
__snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]:
__snake_case : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Tuple = 1000
__snake_case : Any = "huggingface/label-files"
__snake_case : int = "imagenet-1k-id2label.json"
__snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
__snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__snake_case : Optional[Any] = [3, 3, 6, 4]
__snake_case : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__snake_case : List[str] = [3, 3, 9, 6]
__snake_case : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__snake_case : Optional[int] = [4, 3, 10, 5]
__snake_case : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__snake_case : str = [4, 4, 12, 6]
__snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
__snake_case : Tuple = torch.load(lowercase , map_location="cpu" )
__snake_case : Optional[int] = checkpoint
__snake_case : Any = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
__snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
__snake_case : str = get_expected_output(lowercase )
__snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 326 | 1 |
import argparse
import datetime
def lowerCAmelCase__( lowercase : str ) -> str:
__snake_case : int = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
__snake_case : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
__snake_case : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
__snake_case : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
__snake_case : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
__snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
__snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) )
# Start math
if m <= 2:
__snake_case : Optional[Any] = y - 1
__snake_case : Tuple = m + 12
# maths var
__snake_case : int = int(str(lowercase )[:2] )
__snake_case : int = int(str(lowercase )[2:] )
__snake_case : int = int(2.6 * m - 5.3_9 )
__snake_case : int = int(c / 4 )
__snake_case : int = int(k / 4 )
__snake_case : int = int(d + k )
__snake_case : int = int(t + u + v + x )
__snake_case : int = int(z - (2 * c) )
__snake_case : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
__snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 326 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase__( lowercase : str ) -> List[str]:
__snake_case : int = git.Repo(search_parent_directories=lowercase )
__snake_case : Union[str, Any] = {
"repo_id": str(lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f:
json.dump(lowercase , lowercase , indent=4 )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]:
if params.n_gpu <= 0:
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = -1
__snake_case : Union[str, Any] = True
__snake_case : Tuple = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : int = int(os.environ["N_GPU_NODE"] )
__snake_case : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
__snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node
__snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node
__snake_case : Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case : Any = 1
__snake_case : str = 0
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
__snake_case : int = 1
__snake_case : Optional[Any] = 1
__snake_case : Tuple = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0
__snake_case : List[Any] = params.n_nodes > 1
# summary
__snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 326 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 326 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326 | 1 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _lowerCamelCase ( a ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = tempfile.mkdtemp()
__snake_case : Tuple = 8
# DPR tok
__snake_case : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case : str = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__snake_case : str = os.path.join(UpperCAmelCase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
__snake_case : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__snake_case : Any = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__snake_case : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__snake_case : List[str] = {"unk_token": "<unk>"}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__snake_case : List[Any] = os.path.join(UpperCAmelCase , BART_VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Dict = os.path.join(UpperCAmelCase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = os.path.join(self.tmpdirname , "rag_tokenizer" )
__snake_case : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__snake_case : Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(UpperCAmelCase )
rag_tokenizer.save_pretrained(UpperCAmelCase )
__snake_case : Optional[Any] = RagTokenizer.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
__snake_case : Dict = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
__snake_case : Optional[Any] = tokenizer(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
__snake_case : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
__snake_case : List[Any] = tokenizer(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
| 326 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : str =None
@staticmethod
def UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="optuna"
@staticmethod
def UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="ray"
UpperCAmelCase_ : Dict ="'ray[tune]'"
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="sigopt"
@staticmethod
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="wandb"
@staticmethod
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
_UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase ) > 0:
__snake_case : Dict = available_backends[0].name
if len(lowercase ) > 1:
logger.info(
f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 326 | 1 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 |
import math
def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list:
__snake_case : Any = end or len(lowercase )
for i in range(lowercase , lowercase ):
__snake_case : List[str] = i
__snake_case : Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case : Optional[Any] = array[temp_index - 1]
temp_index -= 1
__snake_case : Any = temp_index_value
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap
__snake_case : Any = index
__snake_case : Optional[Any] = 2 * index + 1 # Left Node
__snake_case : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case : Tuple = right_index
if largest != index:
__snake_case , __snake_case : int = array[largest], array[index]
heapify(lowercase , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list ) -> list:
__snake_case : List[str] = len(lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(lowercase , lowercase , lowercase )
for i in range(n - 1 , 0 , -1 ):
__snake_case , __snake_case : Optional[Any] = array[0], array[i]
heapify(lowercase , 0 , lowercase )
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
__snake_case : Union[str, Any] = low
__snake_case : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case : str = array[j], array[i]
i += 1
def lowerCAmelCase__( lowercase : list ) -> list:
if len(lowercase ) == 0:
return array
__snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) )
__snake_case : Dict = 16
return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
__snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase )
intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase )
__snake_case : List[str] = p
return insertion_sort(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip()
_UpperCamelCase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 326 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : str = (3, 32, 128)
__snake_case : Optional[int] = tempfile.mkdtemp()
# fmt: off
__snake_case : Optional[int] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__snake_case : Optional[int] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
__snake_case : int = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
__snake_case : List[str] = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__snake_case : int = Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Union[str, Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__snake_case : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Any = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = image_processor(UpperCAmelCase , return_tensors="np" )
__snake_case : Union[str, Any] = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : Dict = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : int = "test"
__snake_case : Tuple = processor(text=UpperCAmelCase )
__snake_case : List[Any] = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : List[str] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Any = "test"
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Any = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Optional[Any] = processor.char_decode(UpperCAmelCase )
__snake_case : Dict = tokenizer.batch_decode(UpperCAmelCase )
__snake_case : List[str] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Dict = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Union[str, Any] = None
__snake_case : int = self.prepare_image_inputs()
__snake_case : Optional[Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : int = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Dict = torch.randn(1 , 27 , 38 )
__snake_case : List[Any] = torch.randn(1 , 27 , 50257 )
__snake_case : List[str] = torch.randn(1 , 27 , 30522 )
__snake_case : Dict = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 326 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar('''KEY''')
_UpperCamelCase = TypeVar('''VAL''')
@dataclass(frozen=a , slots=a )
class _lowerCamelCase ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCAmelCase_ : KEY
UpperCAmelCase_ : VAL
class _lowerCamelCase ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __bool__( self ) -> bool:
'''simple docstring'''
return False
_UpperCamelCase = _DeletedItem()
class _lowerCamelCase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase = 8 , UpperCAmelCase = 0.75 ) -> None:
'''simple docstring'''
__snake_case : Tuple = initial_block_size
__snake_case : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__snake_case : Optional[int] = capacity_factor
__snake_case : Union[str, Any] = 0
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return hash(UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
'''simple docstring'''
__snake_case : Optional[Any] = self._buckets[ind]
if not stored:
__snake_case : int = _Item(UpperCAmelCase , UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__snake_case : Tuple = _Item(UpperCAmelCase , UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ) -> bool:
'''simple docstring'''
__snake_case : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCAmelCase )
def UpperCAmelCase ( self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__snake_case : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : Optional[int] = self._buckets
__snake_case : Dict = [None] * new_size
__snake_case : Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Iterator[int]:
'''simple docstring'''
__snake_case : Tuple = self._get_bucket_index(UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__snake_case : int = self._get_next_ind(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
if self._try_set(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
break
def __setitem__( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(UpperCAmelCase , UpperCAmelCase )
def __delitem__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
__snake_case : int = self._buckets[ind]
if item is None:
raise KeyError(UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__snake_case : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , UpperCAmelCase ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
__snake_case : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCAmelCase )
def __len__( self ) -> int:
'''simple docstring'''
return self._len
def __iter__( self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = " ,".join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 326 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any:
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = search_prob
__snake_case : str = start_temperate
__snake_case : List[Any] = []
__snake_case : str = 0
__snake_case : Dict = None
while not search_end:
__snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case : List[Any] = current_state
scores.append(lowercase )
iterations += 1
__snake_case : Dict = None
__snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
__snake_case : int = neighbors.pop(lowercase )
__snake_case : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case : List[str] = picked_neighbor
else:
__snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case : str = picked_neighbor
__snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case : Optional[Any] = True
else:
__snake_case : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any:
return (3 * x**2) - (6 * y)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 326 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict =FunnelTokenizer
UpperCAmelCase_ : int =FunnelTokenizerFast
UpperCAmelCase_ : List[str] =True
UpperCAmelCase_ : List[Any] =True
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
__snake_case : Any = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = "UNwant\u00E9d,running"
__snake_case : int = "unwanted, running"
return input_text, output_text
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer_class(self.vocab_file )
__snake_case : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Any = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
__snake_case : str = tokenizer("UNwant\u00E9d,running" )
__snake_case : Any = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
__snake_case : int = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 326 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : Tuple ="FlavaImageProcessor"
UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 326 | 1 |
from __future__ import annotations
_UpperCamelCase = list[list[int]]
# assigning initial values to the grid
_UpperCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_UpperCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase__( lowercase : Matrix , lowercase : int , lowercase : int , lowercase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase__( lowercase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase__( lowercase : Matrix ) -> Matrix | None:
if location := find_empty_location(lowercase ):
__snake_case , __snake_case : Tuple = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowercase , lowercase , lowercase , lowercase ):
__snake_case : List[Any] = digit
if sudoku(lowercase ) is not None:
return grid
__snake_case : Tuple = 0
return None
def lowerCAmelCase__( lowercase : Matrix ) -> None:
for row in grid:
for cell in row:
print(lowercase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
_UpperCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 326 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
_UpperCamelCase = {
'''camembert-base''': 512,
}
_UpperCamelCase = '''▁'''
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : str =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
__snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__snake_case : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__snake_case : Optional[int] = len(self.fairseq_tokens_to_ids )
__snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = []
__snake_case : Union[str, Any] = ""
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase ) + token
__snake_case : List[Any] = True
__snake_case : Union[str, Any] = []
else:
current_sub_tokens.append(UpperCAmelCase )
__snake_case : int = False
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : List[str] = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
__snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 326 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Tuple ) -> List[Any]:
for attribute in key.split("." ):
__snake_case : Union[str, Any] = getattr(lowercase , lowercase )
if weight_type is not None:
__snake_case : Any = getattr(lowercase , lowercase ).shape
else:
__snake_case : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Dict = value
elif weight_type == "bias":
__snake_case : Dict = value
elif weight_type == "running_mean":
__snake_case : Union[str, Any] = value
elif weight_type == "running_var":
__snake_case : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__snake_case : Optional[int] = value
elif weight_type == "inv_freq":
__snake_case : int = value
else:
__snake_case : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase__( lowercase : Dict , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> int:
__snake_case : Dict = []
__snake_case : Dict = fairseq_model.state_dict()
__snake_case : Any = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , )
__snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : List[str] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__snake_case : Optional[int] = True
if "*" in mapped_key:
__snake_case : int = name.split(lowercase )[0].split("." )[-2]
__snake_case : Union[str, Any] = mapped_key.replace("*" , lowercase )
if "pos_bias_u" in name:
__snake_case : Any = None
elif "pos_bias_v" in name:
__snake_case : int = None
elif "weight_g" in name:
__snake_case : List[str] = "weight_g"
elif "weight_v" in name:
__snake_case : Union[str, Any] = "weight_v"
elif "bias" in name:
__snake_case : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : str = "weight"
elif "running_mean" in name:
__snake_case : List[Any] = "running_mean"
elif "inv_freq" in name:
__snake_case : Union[str, Any] = "inv_freq"
elif "running_var" in name:
__snake_case : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
__snake_case : Union[str, Any] = "num_batches_tracked"
else:
__snake_case : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase__( lowercase : Tuple , lowercase : str , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : Dict ) -> List[str]:
__snake_case : str = full_name.split("conv_layers." )[-1]
__snake_case : Any = name.split("." )
__snake_case : Dict = int(items[0] )
__snake_case : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Tuple=None , lowercase : List[Any]=None , lowercase : int=True ) -> List[str]:
if config_path is not None:
__snake_case : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="swish" )
else:
__snake_case : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__snake_case : int = "rotary"
if is_finetuned:
if dict_path:
__snake_case : Tuple = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : str = os.path.join(lowercase , "vocab.json" )
if not os.path.isdir(lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__snake_case : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Any = 0
__snake_case : Any = 1
with open(lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase , lowercase )
__snake_case : Dict = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase , )
__snake_case : Tuple = True if config.feat_extract_norm == "layer" else False
__snake_case : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__snake_case : int = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__snake_case : str = WavaVecaConformerForCTC(lowercase )
else:
__snake_case : Optional[Any] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__snake_case : Union[str, Any] = argparse.Namespace(task="audio_pretraining" )
__snake_case : Union[str, Any] = fairseq.tasks.setup_task(lowercase )
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__snake_case : Optional[Any] = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 326 |
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool:
__snake_case : List[str] = len(lowercase )
__snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowercase ) as metadata_file:
__snake_case : int = json.load(lowercase )
__snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Tuple = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
__snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
__snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : Tuple = json.load(lowercase )
__snake_case : List[Any] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
__snake_case : Any = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
__snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : List[Any] = state_dict[bias_name]
__snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
__snake_case : str = state_dict[prefix + matrix_name]
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : str = state_dict[key]
else:
__snake_case : str = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
__snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Union[str, Any] = (0, 9)
__snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Any = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Optional[Any] = torch.Size((1, 33, 768) )
__snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 1, 768) )
__snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : str = MLukeTokenizer.from_pretrained(lowercase )
__snake_case : Dict = "Tokyo is the capital of <mask>."
__snake_case : Union[str, Any] = (24, 30)
__snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : int = model(**lowercase )
__snake_case : Dict = encoding["input_ids"][0].tolist()
__snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
__snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]:
__snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )]
__snake_case : Any = {}
for entry in data:
__snake_case : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Optional[int] = entity_id
break
__snake_case : Union[str, Any] = f"""{language}:{entity_name}"""
__snake_case : Any = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase = 4
_UpperCamelCase = 3
class _lowerCamelCase ( a ):
"""simple docstring"""
pass
def lowerCAmelCase__( lowercase : List[str] ) -> Any:
for shard in shards:
for i in range(lowercase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : List[Any] = int(os.environ["RANK"] )
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : List[str] = ArgumentParser()
parser.add_argument("--streaming" , type=lowercase )
parser.add_argument("--local_rank" , type=lowercase )
parser.add_argument("--num_workers" , type=lowercase , default=0 )
__snake_case : Any = parser.parse_args()
__snake_case : Dict = args.streaming
__snake_case : Union[str, Any] = args.num_workers
__snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]}
__snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase )
if not streaming:
__snake_case : Any = Dataset.from_list(list(lowercase ) )
__snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase )
__snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase )
__snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__snake_case : Dict = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 326 | 1 |
from datetime import datetime
import requests
def lowerCAmelCase__( lowercase : str ) -> bytes:
__snake_case : Union[str, Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__snake_case : str = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowercase ).content
if __name__ == "__main__":
_UpperCamelCase = input('''Enter Video/IGTV url: ''').strip()
_UpperCamelCase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 326 |
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int:
__snake_case : List[Any] = limit + 1
__snake_case : List[str] = [0] * limit
for first_term in range(1 , lowercase ):
for n in range(lowercase , lowercase , lowercase ):
__snake_case : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Union[str, Any] = BlipImageProcessor()
__snake_case : List[str] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__snake_case : Union[str, Any] = BlipaProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Any = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : str = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__snake_case : Any = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : str = self.prepare_image_inputs()
__snake_case : Optional[int] = image_processor(UpperCAmelCase , return_tensors="np" )
__snake_case : Union[str, Any] = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : str = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : int = "lower newer"
__snake_case : Optional[Any] = processor(text=UpperCAmelCase )
__snake_case : Any = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Union[str, Any] = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Optional[int] = "lower newer"
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Dict = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : List[Any] = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : int = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Any = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Union[str, Any] = "lower newer"
__snake_case : str = self.prepare_image_inputs()
__snake_case : Tuple = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 326 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 | 1 |
def lowerCAmelCase__( lowercase : int = 50 ) -> int:
__snake_case : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326 | 1 |
class _lowerCamelCase ( a ):
"""simple docstring"""
pass
class _lowerCamelCase ( a ):
"""simple docstring"""
pass
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = [
[],
[],
[],
]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(UpperCAmelCase )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = []
def UpperCAmelCase ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__snake_case : Optional[int] = min(self.queue )
self.queue.remove(UpperCAmelCase )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def lowerCAmelCase__( ) -> List[Any]:
__snake_case : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCAmelCase__( ) -> int:
__snake_case : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 326 |
import argparse
import datetime
def lowerCAmelCase__( lowercase : str ) -> str:
__snake_case : int = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
__snake_case : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
__snake_case : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
__snake_case : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
__snake_case : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
__snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
__snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) )
# Start math
if m <= 2:
__snake_case : Optional[Any] = y - 1
__snake_case : Tuple = m + 12
# maths var
__snake_case : int = int(str(lowercase )[:2] )
__snake_case : int = int(str(lowercase )[2:] )
__snake_case : int = int(2.6 * m - 5.3_9 )
__snake_case : int = int(c / 4 )
__snake_case : int = int(k / 4 )
__snake_case : int = int(d + k )
__snake_case : int = int(t + u + v + x )
__snake_case : int = int(z - (2 * c) )
__snake_case : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
__snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 326 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 326 |
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int:
if index == r:
for j in range(lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]:
# A temporary array to store all combination one by one
__snake_case : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 326 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__snake_case : List[str] = len(UpperCAmelCase ) - 1
def UpperCAmelCase ( self , UpperCAmelCase ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase ) , 5 ) == 1
return output_values
def UpperCAmelCase ( self , UpperCAmelCase ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : Optional[int] = self.basis_function(UpperCAmelCase )
__snake_case : List[str] = 0.0
__snake_case : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase ( self , UpperCAmelCase = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__snake_case : list[float] = [] # x coordinates of points to plot
__snake_case : list[float] = [] # y coordinates of points to plot
__snake_case : Any = 0.0
while t <= 1:
__snake_case : Union[str, Any] = self.bezier_curve_function(UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__snake_case : Union[str, Any] = [i[0] for i in self.list_of_points]
__snake_case : int = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase , UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(UpperCAmelCase , UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 326 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCAmelCase__( lowercase : str ) -> Optional[Any]:
__snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict:
__snake_case : Tuple = OrderedDict()
__snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__snake_case : Optional[Any] = key
for name_pair in rename_keys_prefix:
__snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__snake_case : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__snake_case : List[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__snake_case : Any = "pretraining"
if "vcr" in checkpoint_path:
__snake_case : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__snake_case : Tuple = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__snake_case : Any = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 512}
__snake_case : Any = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__snake_case : List[Any] = {"visual_embedding_dim": 2048}
__snake_case : Optional[Any] = "vqa_advanced"
elif "vqa" in checkpoint_path:
__snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
__snake_case : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__snake_case : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__snake_case : List[Any] = "nlvr"
__snake_case : Union[str, Any] = VisualBertConfig(**lowercase )
# Load State Dict
__snake_case : Any = load_state_dict(lowercase )
__snake_case : Dict = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
__snake_case : Optional[Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
__snake_case : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
__snake_case : Tuple = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
__snake_case : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 326 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : int = SamProcessor(UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : List[str] = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__snake_case : int = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = self.get_image_processor()
__snake_case : List[str] = SamProcessor(image_processor=UpperCAmelCase )
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Tuple = image_processor(UpperCAmelCase , return_tensors="np" )
__snake_case : Any = processor(images=UpperCAmelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : str = self.get_image_processor()
__snake_case : List[str] = SamProcessor(image_processor=UpperCAmelCase )
__snake_case : int = [torch.ones((1, 3, 5, 5) )]
__snake_case : Dict = [[1764, 2646]]
__snake_case : Any = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : List[str] = processor.post_process_masks(
UpperCAmelCase , torch.tensor(UpperCAmelCase ) , torch.tensor(UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Optional[Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : int = processor.post_process_masks(UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Dict = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase ):
__snake_case : List[str] = processor.post_process_masks(UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) )
@require_vision
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Union[str, Any] = SamImageProcessor()
__snake_case : Any = SamProcessor(UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : List[str] = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__snake_case : Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=UpperCAmelCase )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : str = image_processor(UpperCAmelCase , return_tensors="np" )
__snake_case : str = processor(images=UpperCAmelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : int = SamProcessor(image_processor=UpperCAmelCase )
__snake_case : str = [tf.ones((1, 3, 5, 5) )]
__snake_case : Any = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : str = processor.post_process_masks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : int = processor.post_process_masks(
UpperCAmelCase , tf.convert_to_tensor(UpperCAmelCase ) , tf.convert_to_tensor(UpperCAmelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : str = processor.post_process_masks(
UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Any = processor.post_process_masks(
UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = tempfile.mkdtemp()
__snake_case : Optional[Any] = SamImageProcessor()
__snake_case : int = SamProcessor(UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=UpperCAmelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Tuple = [tf.convert_to_tensor(UpperCAmelCase )]
__snake_case : Dict = [torch.tensor(UpperCAmelCase )]
__snake_case : Optional[int] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : Optional[Any] = processor.post_process_masks(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="tf" )
__snake_case : Tuple = processor.post_process_masks(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Any = SamProcessor(image_processor=UpperCAmelCase )
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Any = image_processor(UpperCAmelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Any = processor(images=UpperCAmelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = image_processor(UpperCAmelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : Union[str, Any] = processor(images=UpperCAmelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) )
| 326 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowercase ) as metadata_file:
__snake_case : int = json.load(lowercase )
__snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Tuple = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
__snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
__snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : Tuple = json.load(lowercase )
__snake_case : List[Any] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
__snake_case : Any = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
__snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : List[Any] = state_dict[bias_name]
__snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
__snake_case : str = state_dict[prefix + matrix_name]
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : str = state_dict[key]
else:
__snake_case : str = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
__snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Union[str, Any] = (0, 9)
__snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Any = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Optional[Any] = torch.Size((1, 33, 768) )
__snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 1, 768) )
__snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : str = MLukeTokenizer.from_pretrained(lowercase )
__snake_case : Dict = "Tokyo is the capital of <mask>."
__snake_case : Union[str, Any] = (24, 30)
__snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : int = model(**lowercase )
__snake_case : Dict = encoding["input_ids"][0].tolist()
__snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
__snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]:
__snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )]
__snake_case : Any = {}
for entry in data:
__snake_case : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Optional[int] = entity_id
break
__snake_case : Union[str, Any] = f"""{language}:{entity_name}"""
__snake_case : Any = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 1 |
import csv
import tweepy
# Twitter API credentials
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
def lowerCAmelCase__( lowercase : str ) -> None:
# authorize twitter, initialize tweepy
__snake_case : str = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
__snake_case : int = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
__snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__snake_case : int = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
__snake_case : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
__snake_case : Any = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
__snake_case : Tuple = alltweets[-1].id - 1
print(f"""...{len(lowercase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
__snake_case : Tuple = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , "w" ) as f:
__snake_case : Optional[int] = csv.writer(lowercase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 326 |
from maths.prime_factors import prime_factors
def lowerCAmelCase__( lowercase : int ) -> int:
if not isinstance(lowercase , lowercase ):
__snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
import functools
def lowerCAmelCase__( lowercase : list[int] , lowercase : list[int] ) -> int:
# Validation
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowercase ) >= 366:
raise ValueError("All days elements should be less than 366" )
__snake_case : Optional[int] = set(lowercase )
@functools.cache
def dynamic_programming(lowercase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=lowercase , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=lowercase , default=5 )
parser.add_argument("--batch_size" , type=lowercase , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=lowercase , default=1 )
parser.add_argument("--freeze" , type=lowercase , default=lowercase )
parser.add_argument("--learning_rate" , type=lowercase , default=5E-4 )
parser.add_argument("--seed" , type=lowercase , default=0 )
parser.add_argument("--lr_scheduler_type" , type=lowercase , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=lowercase , default=10 )
parser.add_argument("--weight_decay" , type=lowercase , default=0.0_1 )
parser.add_argument("--output_dir" , type=lowercase , default="./results" )
return parser.parse_args()
_UpperCamelCase = load('''accuracy''')
def lowerCAmelCase__( lowercase : int ) -> List[Any]:
__snake_case , __snake_case : Dict = eval_pred
__snake_case : int = np.argmax(lowercase , axis=1 )
return metric.compute(predictions=lowercase , references=lowercase )
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : Tuple = trainer
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if control.should_evaluate:
__snake_case : List[Any] = deepcopy(UpperCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCAmelCase__( ) -> List[Any]:
__snake_case : List[Any] = get_args()
set_seed(args.seed )
__snake_case : int = load_dataset("codeparrot/codecomplex" , split="train" )
__snake_case : int = dataset.train_test_split(test_size=0.2 )
__snake_case : Optional[Any] = train_test["test"].train_test_split(test_size=0.5 )
__snake_case : List[Any] = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
__snake_case : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case : Tuple = tokenizer.eos_token
__snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__snake_case : Union[str, Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__snake_case : str = False
__snake_case : List[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowercase : Union[str, Any] ):
__snake_case : int = tokenizer(example["src"] , truncation=lowercase , max_length=1024 )
__snake_case : int = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__snake_case : str = train_test_validation.map(
lowercase , batched=lowercase , remove_columns=train_test_validation["train"].column_names , )
__snake_case : int = DataCollatorWithPadding(tokenizer=lowercase )
__snake_case : Optional[int] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
__snake_case : int = Trainer(
model=lowercase , args=lowercase , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=lowercase , data_collator=lowercase , compute_metrics=lowercase , )
print("Training..." )
trainer.add_callback(CustomCallback(lowercase ) )
trainer.train()
if __name__ == "__main__":
main()
| 326 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
__snake_case : Optional[int] = proj_size
__snake_case : str = CLIPVisionModel(UpperCAmelCase )
__snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase )
__snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size )
__snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.model(pixel_values=UpperCAmelCase )
__snake_case : Optional[int] = clip_output.pooler_output
__snake_case : Any = self.mapper(latent_states[:, None] )
__snake_case : Any = self.final_layer_norm(UpperCAmelCase )
__snake_case : str = self.proj_out(UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case : List[Any] = (config.num_hidden_layers + 1) // 5
__snake_case : Dict = config.hidden_size
__snake_case : str = 1
__snake_case : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase )
for _ in range(UpperCAmelCase )
] )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
for block in self.blocks:
__snake_case : int = block(UpperCAmelCase )
return hidden_states
| 326 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase__( ) -> None:
__snake_case , __snake_case : Union[str, Any] = get_dataset(lowercase , lowercase )
print("Processing..." )
__snake_case , __snake_case , __snake_case : List[str] = update_image_and_anno(lowercase , lowercase , lowercase )
for index, image in enumerate(lowercase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case : Optional[int] = random_chars(32 )
__snake_case : List[Any] = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
__snake_case : Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(lowercase )} with {file_name}""" )
__snake_case : Optional[Any] = []
for anno in new_annos[index]:
__snake_case : List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(lowercase )
with open(f"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowerCAmelCase__( lowercase : str , lowercase : str ) -> tuple[list, list]:
__snake_case : str = []
__snake_case : List[str] = []
for label_file in glob.glob(os.path.join(lowercase , "*.txt" ) ):
__snake_case : Any = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowercase ) as in_file:
__snake_case : Dict = in_file.readlines()
__snake_case : int = os.path.join(lowercase , f"""{label_name}.jpg""" )
__snake_case : str = []
for obj_list in obj_lists:
__snake_case : Any = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowercase )
labels.append(lowercase )
return img_paths, labels
def lowerCAmelCase__( lowercase : list , lowercase : list , lowercase : int = 1 ) -> tuple[list, list, list]:
__snake_case : int = []
__snake_case : Tuple = []
__snake_case : Optional[Any] = []
for idx in range(len(lowercase ) ):
__snake_case : Dict = []
__snake_case : List[str] = img_list[idx]
path_list.append(lowercase )
__snake_case : List[str] = anno_list[idx]
__snake_case : List[str] = cva.imread(lowercase )
if flip_type == 1:
__snake_case : int = cva.flip(lowercase , lowercase )
for bbox in img_annos:
__snake_case : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__snake_case : Tuple = cva.flip(lowercase , lowercase )
for bbox in img_annos:
__snake_case : Tuple = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowercase )
new_imgs_list.append(lowercase )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase__( lowercase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__snake_case : List[Any] = ascii_lowercase + digits
return "".join(random.choice(lowercase ) for _ in range(lowercase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 326 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''▁'''
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_UpperCamelCase = {
'''facebook/xglm-564M''': 2048,
}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : int =VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__snake_case : Optional[int] = 7
__snake_case : Any = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
__snake_case : str = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__snake_case : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__snake_case : int = len(self.sp_model )
__snake_case : Optional[int] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase )
__snake_case : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = self.__dict__.copy()
__snake_case : Optional[Any] = None
__snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__snake_case : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__snake_case : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase ))
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : int = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : Dict = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Any = "".join(UpperCAmelCase ).replace(UpperCAmelCase , " " ).strip()
return out_string
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[str] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
__snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 326 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = torch.device('''cpu''')
def lowerCAmelCase__( ) -> Any:
__snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCAmelCase__( lowercase : Dict ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = dct.pop(lowercase )
__snake_case : List[Any] = val
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = []
for k in state_dict.keys():
__snake_case : Union[str, Any] = k
if ".pwconv" in k:
__snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__snake_case : int = k_new.split("." )
if ls[2].isdigit():
__snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]:
__snake_case : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Tuple = 1000
__snake_case : Any = "huggingface/label-files"
__snake_case : int = "imagenet-1k-id2label.json"
__snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
__snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__snake_case : Optional[Any] = [3, 3, 6, 4]
__snake_case : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__snake_case : List[str] = [3, 3, 9, 6]
__snake_case : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__snake_case : Optional[int] = [4, 3, 10, 5]
__snake_case : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__snake_case : str = [4, 4, 12, 6]
__snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
__snake_case : Tuple = torch.load(lowercase , map_location="cpu" )
__snake_case : Optional[int] = checkpoint
__snake_case : Any = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
__snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
__snake_case : str = get_expected_output(lowercase )
__snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 326 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict =VideoToVideoSDPipeline
UpperCAmelCase_ : Dict =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
UpperCAmelCase_ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
UpperCAmelCase_ : Tuple =PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase_ : List[Any] =False
# No `output_type`.
UpperCAmelCase_ : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__snake_case : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
__snake_case : Optional[int] = CLIPTextModel(UpperCAmelCase )
__snake_case : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> str:
'''simple docstring'''
__snake_case : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
__snake_case : int = torch.manual_seed(UpperCAmelCase )
else:
__snake_case : List[Any] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__snake_case : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[int] = self.get_dummy_components()
__snake_case : List[str] = VideoToVideoSDPipeline(**UpperCAmelCase )
__snake_case : int = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__snake_case : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase )
__snake_case : str = "np"
__snake_case : Dict = sd_pipe(**UpperCAmelCase ).frames
__snake_case : Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__snake_case : Tuple = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : List[Any] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case : str = torch.randn((1, 10, 3, 1024, 576) , generator=UpperCAmelCase )
__snake_case : Optional[Any] = video.to("cuda" )
__snake_case : Optional[Any] = "Spiderman is surfing"
__snake_case : Union[str, Any] = pipe(UpperCAmelCase , video=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=3 , output_type="pt" ).frames
__snake_case : str = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 326 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase__( lowercase : str ) -> List[str]:
__snake_case : int = git.Repo(search_parent_directories=lowercase )
__snake_case : Union[str, Any] = {
"repo_id": str(lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f:
json.dump(lowercase , lowercase , indent=4 )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]:
if params.n_gpu <= 0:
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = -1
__snake_case : Union[str, Any] = True
__snake_case : Tuple = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : int = int(os.environ["N_GPU_NODE"] )
__snake_case : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
__snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node
__snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node
__snake_case : Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case : Any = 1
__snake_case : str = 0
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
__snake_case : int = 1
__snake_case : Optional[Any] = 1
__snake_case : Tuple = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0
__snake_case : List[Any] = params.n_nodes > 1
# summary
__snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 326 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''MobileViTFeatureExtractor''']
_UpperCamelCase = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 326 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : str =None
@staticmethod
def UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="optuna"
@staticmethod
def UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="ray"
UpperCAmelCase_ : Dict ="'ray[tune]'"
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="sigopt"
@staticmethod
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="wandb"
@staticmethod
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
_UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase ) > 0:
__snake_case : Dict = available_backends[0].name
if len(lowercase ) > 1:
logger.info(
f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 326 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(UpperCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : int = None
ops.enable_eager_execution_internal()
__snake_case : Any = tf.config.list_physical_devices("CPU" )
if len(UpperCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__snake_case : int = tf.config.list_logical_devices(device_type="CPU" )
__snake_case : Dict = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__snake_case : int = GradientAccumulator()
__snake_case : Tuple = tf.Variable([4.0, 3.0] )
__snake_case , __snake_case : List[str] = create_optimizer(5E-5 , 10 , 5 )
__snake_case : Any = tf.Variable([0.0, 0.0] , trainable=UpperCAmelCase )
def accumulate_on_replica(UpperCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCAmelCase , UpperCAmelCase ):
with strategy.scope():
__snake_case : Tuple = strategy.experimental_local_results(UpperCAmelCase )
local_variables[0].assign(UpperCAmelCase )
local_variables[1].assign(UpperCAmelCase )
strategy.run(UpperCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(UpperCAmelCase )
def _check_local_values(UpperCAmelCase , UpperCAmelCase ):
__snake_case : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , UpperCAmelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , UpperCAmelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 326 |
import math
def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list:
__snake_case : Any = end or len(lowercase )
for i in range(lowercase , lowercase ):
__snake_case : List[str] = i
__snake_case : Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case : Optional[Any] = array[temp_index - 1]
temp_index -= 1
__snake_case : Any = temp_index_value
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap
__snake_case : Any = index
__snake_case : Optional[Any] = 2 * index + 1 # Left Node
__snake_case : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case : Tuple = right_index
if largest != index:
__snake_case , __snake_case : int = array[largest], array[index]
heapify(lowercase , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list ) -> list:
__snake_case : List[str] = len(lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(lowercase , lowercase , lowercase )
for i in range(n - 1 , 0 , -1 ):
__snake_case , __snake_case : Optional[Any] = array[0], array[i]
heapify(lowercase , 0 , lowercase )
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
__snake_case : Union[str, Any] = low
__snake_case : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case : str = array[j], array[i]
i += 1
def lowerCAmelCase__( lowercase : list ) -> list:
if len(lowercase ) == 0:
return array
__snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) )
__snake_case : Dict = 16
return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
__snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase )
intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase )
__snake_case : List[str] = p
return insertion_sort(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip()
_UpperCamelCase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 326 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCamelCase = ['''text''', '''image''', '''audio''']
def lowerCAmelCase__( lowercase : List[str] ) -> Dict:
__snake_case : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase__( lowercase : List ) -> Optional[int]:
__snake_case : List[str] = []
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _lowerCamelCase :
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
__snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__snake_case : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = create_inputs(self.tool.inputs )
__snake_case : Optional[Any] = self.tool(*UpperCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
__snake_case : Any = [outputs]
self.assertListEqual(output_types(UpperCAmelCase ) , self.tool.outputs )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = create_inputs(self.tool.inputs )
__snake_case : List[Any] = self.tool(*UpperCAmelCase )
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : List[str] = [outputs]
self.assertEqual(len(UpperCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCAmelCase , self.tool.outputs ):
__snake_case : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = create_inputs(self.tool.inputs )
__snake_case : str = []
for _input, input_type in zip(UpperCAmelCase , self.tool.inputs ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__snake_case : Optional[Any] = self.tool(*UpperCAmelCase )
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : int = [outputs]
self.assertEqual(len(UpperCAmelCase ) , len(self.tool.outputs ) )
| 326 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326 | 1 |
_UpperCamelCase = 256
# Modulus to hash a string
_UpperCamelCase = 100_0003
def lowerCAmelCase__( lowercase : str , lowercase : str ) -> bool:
__snake_case : Tuple = len(lowercase )
__snake_case : List[Any] = len(lowercase )
if p_len > t_len:
return False
__snake_case : Optional[int] = 0
__snake_case : Optional[int] = 0
__snake_case : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase ):
__snake_case : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__snake_case : Union[str, Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__snake_case : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__snake_case : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCAmelCase__( ) -> None:
__snake_case : str = "abc1abc12"
__snake_case : str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__snake_case : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(lowercase , lowercase ) and not rabin_karp(lowercase , lowercase )
# Test 2)
__snake_case : Dict = "ABABX"
__snake_case : Optional[Any] = "ABABZABABYABABX"
assert rabin_karp(lowercase , lowercase )
# Test 3)
__snake_case : Union[str, Any] = "AAAB"
__snake_case : Any = "ABAAAAAB"
assert rabin_karp(lowercase , lowercase )
# Test 4)
__snake_case : Tuple = "abcdabcy"
__snake_case : Dict = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(lowercase , lowercase )
# Test 5)
__snake_case : Tuple = "Lü"
__snake_case : int = "Lüsai"
assert rabin_karp(lowercase , lowercase )
__snake_case : Any = "Lue"
assert not rabin_karp(lowercase , lowercase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 326 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any:
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = search_prob
__snake_case : str = start_temperate
__snake_case : List[Any] = []
__snake_case : str = 0
__snake_case : Dict = None
while not search_end:
__snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case : List[Any] = current_state
scores.append(lowercase )
iterations += 1
__snake_case : Dict = None
__snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
__snake_case : int = neighbors.pop(lowercase )
__snake_case : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case : List[str] = picked_neighbor
else:
__snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case : str = picked_neighbor
__snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case : Optional[Any] = True
else:
__snake_case : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any:
return (3 * x**2) - (6 * y)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 326 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
_UpperCamelCase = 5
_UpperCamelCase = 10
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =SpeechaTextTokenizer
UpperCAmelCase_ : Optional[int] =False
UpperCAmelCase_ : Union[str, Any] =True
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__snake_case : Optional[int] = sp.SentencePieceProcessor()
spm_model.Load(UpperCAmelCase )
__snake_case : Union[str, Any] = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCAmelCase ) )]
__snake_case : Union[str, Any] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__snake_case : Optional[int] = Path(self.tmpdirname )
save_json(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__snake_case : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = "<pad>"
__snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCAmelCase ) , 1001 )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__snake_case : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [289, 50, 14, 174, 386] , )
__snake_case : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__snake_case : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__snake_case : str = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] ="valhalla/s2t_mustc_multilinguial_medium"
UpperCAmelCase_ : Dict ="C'est trop cool"
UpperCAmelCase_ : int ="Esto es genial"
@classmethod
def UpperCAmelCase ( cls ) -> List[str]:
'''simple docstring'''
__snake_case : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids )
__snake_case : List[Any] = [ES_CODE, 4, 1601, 47, 7647, 2]
__snake_case : int = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
__snake_case : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = "fr"
__snake_case : Tuple = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__snake_case : str = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 326 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : Tuple ="FlavaImageProcessor"
UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 326 | 1 |
def lowerCAmelCase__( lowercase : int = 100 ) -> int:
__snake_case : Dict = set()
__snake_case : List[Any] = 0
__snake_case : Optional[Any] = n + 1 # maximum limit
for a in range(2 , lowercase ):
for b in range(2 , lowercase ):
__snake_case : Any = a**b # calculates the current power
collect_powers.add(lowercase ) # adds the result to the set
return len(lowercase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 326 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
_UpperCamelCase = {
'''camembert-base''': 512,
}
_UpperCamelCase = '''▁'''
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : str =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
__snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__snake_case : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__snake_case : Optional[int] = len(self.fairseq_tokens_to_ids )
__snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = []
__snake_case : Union[str, Any] = ""
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase ) + token
__snake_case : List[Any] = True
__snake_case : Union[str, Any] = []
else:
current_sub_tokens.append(UpperCAmelCase )
__snake_case : int = False
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : List[str] = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
__snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 326 | 1 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : float , lowercase : float , lowercase : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool:
__snake_case : List[str] = len(lowercase )
__snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : Optional[int] = len(UpperCAmelCase )
__snake_case : Any = [0] * len_array
if len_array > 0:
__snake_case : Tuple = array[0]
for i in range(1 , UpperCAmelCase ):
__snake_case : Optional[Any] = self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase ( self , UpperCAmelCase ) -> bool:
'''simple docstring'''
__snake_case : Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase = 4
_UpperCamelCase = 3
class _lowerCamelCase ( a ):
"""simple docstring"""
pass
def lowerCAmelCase__( lowercase : List[str] ) -> Any:
for shard in shards:
for i in range(lowercase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : List[Any] = int(os.environ["RANK"] )
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : List[str] = ArgumentParser()
parser.add_argument("--streaming" , type=lowercase )
parser.add_argument("--local_rank" , type=lowercase )
parser.add_argument("--num_workers" , type=lowercase , default=0 )
__snake_case : Any = parser.parse_args()
__snake_case : Dict = args.streaming
__snake_case : Union[str, Any] = args.num_workers
__snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]}
__snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase )
if not streaming:
__snake_case : Any = Dataset.from_list(list(lowercase ) )
__snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase )
__snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase )
__snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__snake_case : Dict = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 326 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase__( lowercase : Tuple ) -> List[str]:
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowerCAmelCase__( lowercase : Optional[int] ) -> Union[str, Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def lowerCAmelCase__( lowercase : str ) -> List[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case : List[Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
def lowerCAmelCase__( lowercase : Any , lowercase : Dict ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__snake_case : Union[str, Any] = 0
# Doctest custom flag to ignore output.
_UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
_UpperCamelCase = doctest.OutputChecker
class _lowerCamelCase ( a ):
"""simple docstring"""
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCamelCase = CustomOutputChecker
_UpperCamelCase = HfDoctestModule
_UpperCamelCase = HfDocTestParser
| 326 |
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int:
__snake_case : List[Any] = limit + 1
__snake_case : List[str] = [0] * limit
for first_term in range(1 , lowercase ):
for n in range(lowercase , lowercase , lowercase ):
__snake_case : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
_UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCAmelCase__( lowercase : str ) -> Any:
__snake_case : Optional[int] = {}
with open(lowercase , "r" ) as file:
for line_number, line in enumerate(lowercase ):
__snake_case : List[str] = line.strip()
if line:
__snake_case : Dict = line.split()
__snake_case : Tuple = line_number
__snake_case : Optional[int] = words[0]
__snake_case : int = value
return result
def lowerCAmelCase__( lowercase : str , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : List[str] ) -> Optional[Any]:
for attribute in key.split("." ):
__snake_case : Tuple = getattr(lowercase , lowercase )
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase ):
__snake_case : Dict = PARAM_MAPPING[full_name.split("." )[-1]]
__snake_case : Any = "param"
if weight_type is not None and weight_type != "param":
__snake_case : Any = getattr(lowercase , lowercase ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : List[str] = hf_pointer
for attribute in hf_param_name.split("." ):
__snake_case : List[Any] = getattr(lowercase , lowercase )
__snake_case : Tuple = shape_pointer.shape
# let's reduce dimension
__snake_case : Union[str, Any] = value[0]
else:
__snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : int = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : List[str] = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
__snake_case : Any = getattr(lowercase , lowercase )
__snake_case : Dict = value
else:
__snake_case : Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : str , lowercase : str ) -> List[str]:
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase ):
__snake_case : Dict = PARAM_MAPPING[full_name.split("." )[-1]]
__snake_case : Any = "param"
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : List[str] = ".".join([key, hf_param_name] )
else:
__snake_case : Tuple = key
__snake_case : int = value if "lm_head" in full_key else value[0]
_UpperCamelCase = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCAmelCase__( lowercase : str , lowercase : Dict , lowercase : List[str]=None , lowercase : Tuple=None ) -> Any:
__snake_case : Optional[Any] = False
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : Optional[int] = name.split(lowercase )[0].split("." )[-2]
__snake_case : Union[str, Any] = mapped_key.replace("*" , lowercase )
if "weight_g" in name:
__snake_case : Dict = "weight_g"
elif "weight_v" in name:
__snake_case : Dict = "weight_v"
elif "bias" in name:
__snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : str = "weight"
else:
__snake_case : Any = None
if hf_dict is not None:
rename_dict(lowercase , lowercase , lowercase , lowercase , lowercase )
else:
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
return is_used
return is_used
def lowerCAmelCase__( lowercase : Dict , lowercase : List[str] , lowercase : Tuple ) -> int:
__snake_case : List[Any] = []
__snake_case : Dict = fairseq_model.state_dict()
__snake_case : Dict = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , )
__snake_case : int = True
else:
__snake_case : List[str] = load_wavaveca_layer(lowercase , lowercase , lowercase )
if not is_used:
unused_weights.append(lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Optional[int] , lowercase : str ) -> List[Any]:
__snake_case : Optional[int] = full_name.split("conv_layers." )[-1]
__snake_case : Dict = name.split("." )
__snake_case : str = int(items[0] )
__snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def lowerCAmelCase__( lowercase : str , lowercase : Any , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : Any=True , lowercase : int=False ) -> int:
if config_path is not None:
__snake_case : Dict = WavaVecaConfig.from_pretrained(lowercase )
else:
__snake_case : List[Any] = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[Any] = read_txt_into_dict(lowercase )
__snake_case : List[Any] = idalabel
__snake_case : List[str] = WavaVecaForSequenceClassification(lowercase )
__snake_case : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
feature_extractor.save_pretrained(lowercase )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : Optional[int] = target_dict.bos_index
__snake_case : int = target_dict.eos_index
__snake_case : Optional[int] = len(target_dict.symbols )
__snake_case : str = os.path.join(lowercase , "vocab.json" )
if not os.path.isdir(lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__snake_case : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Tuple = 0
__snake_case : str = 1
with open(lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase , lowercase )
__snake_case : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase , )
__snake_case : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
__snake_case : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__snake_case : List[Any] = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__snake_case : Union[str, Any] = WavaVecaForCTC(lowercase )
else:
__snake_case : int = WavaVecaForPreTraining(lowercase )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__snake_case : Union[str, Any] = argparse.Namespace(task="audio_pretraining" )
__snake_case : Union[str, Any] = fairseq.tasks.setup_task(lowercase )
__snake_case , __snake_case , __snake_case : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__snake_case : Optional[int] = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 326 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 | 1 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : tuple[int, int] , lowercase : int ) -> list[tuple[int, int]]:
__snake_case , __snake_case : Optional[Any] = position
__snake_case : List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__snake_case : Dict = []
for position in positions:
__snake_case , __snake_case : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowercase )
return permissible_positions
def lowerCAmelCase__( lowercase : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase__( lowercase : list[list[int]] , lowercase : tuple[int, int] , lowercase : int ) -> bool:
if is_complete(lowercase ):
return True
for position in get_valid_pos(lowercase , len(lowercase ) ):
__snake_case , __snake_case : List[str] = position
if board[y][x] == 0:
__snake_case : int = curr + 1
if open_knight_tour_helper(lowercase , lowercase , curr + 1 ):
return True
__snake_case : Optional[Any] = 0
return False
def lowerCAmelCase__( lowercase : int ) -> list[list[int]]:
__snake_case : Tuple = [[0 for i in range(lowercase )] for j in range(lowercase )]
for i in range(lowercase ):
for j in range(lowercase ):
__snake_case : Optional[Any] = 1
if open_knight_tour_helper(lowercase , (i, j) , 1 ):
return board
__snake_case : List[Any] = 0
__snake_case : Tuple = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.